^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*******************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This file is part of the Emulex Linux Device Driver for *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Fibre Channsel Host Bus Adapters. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2004-2016 Emulex. All rights reserved. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * EMULEX and SLI are trademarks of Emulex. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * www.broadcom.com *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Portions Copyright (C) 2004-2005 Christoph Hellwig *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * This program is free software; you can redistribute it and/or *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * modify it under the terms of version 2 of the GNU General *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Public License as published by the Free Software Foundation. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * This program is distributed in the hope that it will be useful. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * TO BE LEGALLY INVALID. See the GNU General Public License for *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * more details, a copy of which can be found in the file COPYING *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * included with this package. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) ********************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/crc-t10dif.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <net/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <scsi/scsi_eh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <scsi/scsi_transport_fc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <scsi/fc/fc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "lpfc_version.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "lpfc_hw4.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "lpfc_hw.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "lpfc_sli.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "lpfc_sli4.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "lpfc_nl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include "lpfc_disc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include "lpfc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include "lpfc_scsi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "lpfc_nvme.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "lpfc_logmsg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include "lpfc_crtn.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include "lpfc_vport.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "lpfc_debugfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct lpfc_async_xchg_ctx *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) dma_addr_t rspbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) uint16_t rspsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) struct lpfc_async_xchg_ctx *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct lpfc_async_xchg_ctx *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) uint32_t, uint16_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct lpfc_async_xchg_ctx *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) uint32_t, uint16_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static void lpfc_nvmet_wqfull_flush(struct lpfc_hba *, struct lpfc_queue *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) struct lpfc_async_xchg_ctx *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static void lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static void lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static union lpfc_wqe128 lpfc_tsend_cmd_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) static union lpfc_wqe128 lpfc_treceive_cmd_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static union lpfc_wqe128 lpfc_trsp_cmd_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* Setup WQE templates for NVME IOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) lpfc_nvmet_cmd_template(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) union lpfc_wqe128 *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* TSEND template */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) wqe = &lpfc_tsend_cmd_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) memset(wqe, 0, sizeof(union lpfc_wqe128));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Word 0, 1, 2 - BDE is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /* Word 3 - payload_offset_len is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Word 4 - relative_offset is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* Word 5 - is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /* Word 6 - ctxt_tag, xri_tag is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /* Word 7 - wqe_ar is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) bf_set(wqe_pu, &wqe->fcp_tsend.wqe_com, PARM_REL_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) bf_set(wqe_class, &wqe->fcp_tsend.wqe_com, CLASS3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) bf_set(wqe_ct, &wqe->fcp_tsend.wqe_com, SLI4_CT_RPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* Word 8 - abort_tag is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Word 9 - reqtag, rcvoxid is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* Word 10 - wqes, xc is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com, LPFC_WQE_LENLOC_WORD12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Word 11 - sup, irsp, irsplen is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com, FCP_COMMAND_TSEND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) bf_set(wqe_pbde, &wqe->fcp_tsend.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /* Word 12 - fcp_data_len is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* Word 13, 14, 15 - PBDE is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* TRECEIVE template */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) wqe = &lpfc_treceive_cmd_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) memset(wqe, 0, sizeof(union lpfc_wqe128));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Word 0, 1, 2 - BDE is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) /* Word 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Word 4 - relative_offset is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Word 5 - is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* Word 6 - ctxt_tag, xri_tag is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /* Word 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com, CMD_FCP_TRECEIVE64_WQE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) bf_set(wqe_pu, &wqe->fcp_treceive.wqe_com, PARM_REL_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) bf_set(wqe_class, &wqe->fcp_treceive.wqe_com, CLASS3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) bf_set(wqe_ct, &wqe->fcp_treceive.wqe_com, SLI4_CT_RPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /* Word 8 - abort_tag is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* Word 9 - reqtag, rcvoxid is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Word 10 - xc is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com, LPFC_WQE_LENLOC_WORD12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* Word 11 - pbde is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com, FCP_COMMAND_TRECEIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) bf_set(wqe_sup, &wqe->fcp_treceive.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Word 12 - fcp_data_len is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* Word 13, 14, 15 - PBDE is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* TRSP template */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) wqe = &lpfc_trsp_cmd_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) memset(wqe, 0, sizeof(union lpfc_wqe128));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* Word 0, 1, 2 - BDE is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Word 3 - response_len is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* Word 4, 5 - is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) /* Word 6 - ctxt_tag, xri_tag is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) /* Word 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) bf_set(wqe_pu, &wqe->fcp_trsp.wqe_com, PARM_UNUSED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) bf_set(wqe_class, &wqe->fcp_trsp.wqe_com, CLASS3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) bf_set(wqe_ct, &wqe->fcp_trsp.wqe_com, SLI4_CT_RPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1); /* wqe_ar */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* Word 8 - abort_tag is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /* Word 9 - reqtag is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* Word 10 wqes, xc is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com, LPFC_WQE_LENLOC_WORD3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) /* Word 11 irsp, irsplen is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com, FCP_COMMAND_TRSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) bf_set(wqe_sup, &wqe->fcp_trsp.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) bf_set(wqe_pbde, &wqe->fcp_trsp.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* Word 12, 13, 14, 15 - is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) static struct lpfc_async_xchg_ctx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) lpfc_nvmet_get_ctx_for_xri(struct lpfc_hba *phba, u16 xri)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) struct lpfc_async_xchg_ctx *ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) unsigned long iflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static struct lpfc_async_xchg_ctx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) lpfc_nvmet_get_ctx_for_oxid(struct lpfc_hba *phba, u16 oxid, u32 sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) struct lpfc_async_xchg_ctx *ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) unsigned long iflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) list_for_each_entry(ctxp, &phba->sli4_hba.t_active_ctx_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) if (ctxp->oxid != oxid || ctxp->sid != sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) return ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) lpfc_nvmet_defer_release(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct lpfc_async_xchg_ctx *ctxp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) lockdep_assert_held(&ctxp->ctxlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) "6313 NVMET Defer ctx release oxid x%x flg x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ctxp->oxid, ctxp->flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (ctxp->flag & LPFC_NVME_CTX_RLS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) ctxp->flag |= LPFC_NVME_CTX_RLS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) spin_lock(&phba->sli4_hba.t_active_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) list_del(&ctxp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) spin_unlock(&phba->sli4_hba.t_active_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * __lpfc_nvme_xmt_ls_rsp_cmp - Generic completion handler for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * transmission of an NVME LS response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * @phba: Pointer to HBA context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * @cmdwqe: Pointer to driver command WQE object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * @wcqe: Pointer to driver response CQE object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * The function is called from SLI ring event handler with no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * lock held. The function frees memory resources used for the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * used to send the NVME LS RSP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct lpfc_wcqe_complete *wcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) struct lpfc_async_xchg_ctx *axchg = cmdwqe->context2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) struct nvmefc_ls_rsp *ls_rsp = &axchg->ls_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) uint32_t status, result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) result = wcqe->parameter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (axchg->state != LPFC_NVME_STE_LS_RSP || axchg->entry_cnt != 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) "6410 NVMEx LS cmpl state mismatch IO x%x: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) "%d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) axchg->oxid, axchg->state, axchg->entry_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x result x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) axchg->oxid, status, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) "6038 NVMEx LS rsp cmpl: %d %d oxid x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) status, result, axchg->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) lpfc_nlp_put(cmdwqe->context1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) cmdwqe->context2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) cmdwqe->context3 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) lpfc_sli_release_iocbq(phba, cmdwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) ls_rsp->done(ls_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) "6200 NVMEx LS rsp cmpl done status %d oxid x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) status, axchg->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) kfree(axchg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * @phba: Pointer to HBA context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * @cmdwqe: Pointer to driver command WQE object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * @wcqe: Pointer to driver response CQE object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * The function is called from SLI ring event handler with no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * lock held. This function is the completion handler for NVME LS commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * The function updates any states and statistics, then calls the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * generic completion handler to free resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) struct lpfc_wcqe_complete *wcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) uint32_t status, result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (!phba->targetport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) goto finish;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) result = wcqe->parameter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (tgtp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) atomic_inc(&tgtp->xmt_ls_rsp_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (result == IOERR_ABORT_REQUESTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) atomic_inc(&tgtp->xmt_ls_rsp_aborted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (bf_get(lpfc_wcqe_c_xb, wcqe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) atomic_inc(&tgtp->xmt_ls_rsp_xb_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) finish:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) __lpfc_nvme_xmt_ls_rsp_cmp(phba, cmdwqe, wcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * @phba: HBA buffer is associated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * @ctxp: context to clean up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) * @mp: Buffer to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * Description: Frees the given DMA buffer in the appropriate way given by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * reposting it to its associated RQ so it can be reused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * Notes: Takes phba->hbalock. Can be called with or without other locks held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * Returns: None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) struct fc_frame_header *fc_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct rqb_dmabuf *nvmebuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) struct lpfc_nvmet_ctx_info *infop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) uint32_t size, oxid, sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) unsigned long iflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (ctxp->state == LPFC_NVME_STE_FREE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) "6411 NVMET free, already free IO x%x: %d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ctxp->oxid, ctxp->state, ctxp->entry_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (ctxp->rqb_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) spin_lock_irqsave(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) nvmebuf = ctxp->rqb_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* check if freed in another path whilst acquiring lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (nvmebuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) ctxp->rqb_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ctxp->flag &= ~LPFC_NVME_CTX_REUSE_WQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) nvmebuf->hrq->rqbp->rqb_free_buffer(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) nvmebuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) /* repost */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ctxp->state = LPFC_NVME_STE_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) if (phba->sli4_hba.nvmet_io_wait_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) nvmebuf, struct rqb_dmabuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) hbuf.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) phba->sli4_hba.nvmet_io_wait_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) oxid = be16_to_cpu(fc_hdr->fh_ox_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) size = nvmebuf->bytes_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) sid = sli4_sid_from_fc_hdr(fc_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ctxp->wqeq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) ctxp->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) ctxp->phba = phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) ctxp->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) ctxp->oxid = oxid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) ctxp->sid = sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) ctxp->state = LPFC_NVME_STE_RCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) ctxp->entry_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) ctxp->flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ctxp->ctxbuf = ctx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) ctxp->rqb_buffer = (void *)nvmebuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) spin_lock_init(&ctxp->ctxlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /* NOTE: isr time stamp is stale when context is re-assigned*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) if (ctxp->ts_isr_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) ctxp->ts_cmd_nvme = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) ctxp->ts_nvme_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ctxp->ts_data_wqput = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) ctxp->ts_isr_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) ctxp->ts_data_nvme = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) ctxp->ts_nvme_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) ctxp->ts_status_wqput = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ctxp->ts_isr_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) ctxp->ts_status_nvme = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) atomic_inc(&tgtp->rcv_fcp_cmd_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* Indicate that a replacement buffer has been posted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) spin_lock_irqsave(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) ctxp->flag |= LPFC_NVME_CTX_REUSE_WQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) atomic_inc(&tgtp->rcv_fcp_cmd_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) "6181 Unable to queue deferred work "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) "for oxid x%x. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) "FCP Drop IO [x%x x%x x%x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) ctxp->oxid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) atomic_read(&tgtp->rcv_fcp_cmd_in),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) atomic_read(&tgtp->rcv_fcp_cmd_out),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) atomic_read(&tgtp->xmt_fcp_release));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) spin_lock_irqsave(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) lpfc_nvmet_defer_release(phba, ctxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * Use the CPU context list, from the MRQ the IO was received on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * (ctxp->idx), to save context structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) list_del_init(&ctxp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) infop = lpfc_get_ctx_list(phba, cpu, ctxp->idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) spin_lock_irqsave(&infop->nvmet_ctx_list_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) infop->nvmet_ctx_list_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) lpfc_nvmet_ktime(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) struct lpfc_async_xchg_ctx *ctxp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) uint64_t seg1, seg2, seg3, seg4, seg5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) uint64_t seg6, seg7, seg8, seg9, seg10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) uint64_t segsum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (ctxp->ts_status_nvme < ctxp->ts_isr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * Segment 1 - Time from FCP command received by MSI-X ISR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * to FCP command is passed to NVME Layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * Segment 2 - Time from FCP command payload handed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * off to NVME Layer to Driver receives a Command op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * from NVME Layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) * Segment 3 - Time from Driver receives a Command op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) * from NVME Layer to Command is put on WQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * Segment 4 - Time from Driver WQ put is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * to MSI-X ISR for Command cmpl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * Segment 5 - Time from MSI-X ISR for Command cmpl to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * Command cmpl is passed to NVME Layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * Segment 6 - Time from Command cmpl is passed to NVME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * Layer to Driver receives a RSP op from NVME Layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * Segment 7 - Time from Driver receives a RSP op from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * NVME Layer to WQ put is done on TRSP FCP Status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * Segment 8 - Time from Driver WQ put is done on TRSP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * FCP Status to MSI-X ISR for TRSP cmpl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * TRSP cmpl is passed to NVME Layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * Segment 10 - Time from FCP command received by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * MSI-X ISR to command is completed on wire.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * (Segments 1 thru 8) for READDATA / WRITEDATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) * (Segments 1 thru 4) for READDATA_RSP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) segsum = seg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) seg2 = ctxp->ts_nvme_data - ctxp->ts_isr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) if (segsum > seg2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) seg2 -= segsum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) segsum += seg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) seg3 = ctxp->ts_data_wqput - ctxp->ts_isr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (segsum > seg3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) seg3 -= segsum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) segsum += seg3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) seg4 = ctxp->ts_isr_data - ctxp->ts_isr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) if (segsum > seg4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) seg4 -= segsum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) segsum += seg4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) seg5 = ctxp->ts_data_nvme - ctxp->ts_isr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (segsum > seg5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) seg5 -= segsum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) segsum += seg5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /* For auto rsp commands seg6 thru seg10 will be 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) seg6 = ctxp->ts_nvme_status - ctxp->ts_isr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (segsum > seg6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) seg6 -= segsum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) segsum += seg6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) seg7 = ctxp->ts_status_wqput - ctxp->ts_isr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) if (segsum > seg7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) seg7 -= segsum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) segsum += seg7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) seg8 = ctxp->ts_isr_status - ctxp->ts_isr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (segsum > seg8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) seg8 -= segsum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) segsum += seg8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) seg9 = ctxp->ts_status_nvme - ctxp->ts_isr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) if (segsum > seg9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) seg9 -= segsum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) segsum += seg9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (ctxp->ts_isr_status < ctxp->ts_isr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) seg10 = (ctxp->ts_isr_status -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ctxp->ts_isr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (ctxp->ts_isr_data < ctxp->ts_isr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) seg6 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) seg7 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) seg8 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) seg9 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) phba->ktime_seg1_total += seg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (seg1 < phba->ktime_seg1_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) phba->ktime_seg1_min = seg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) else if (seg1 > phba->ktime_seg1_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) phba->ktime_seg1_max = seg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) phba->ktime_seg2_total += seg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) if (seg2 < phba->ktime_seg2_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) phba->ktime_seg2_min = seg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) else if (seg2 > phba->ktime_seg2_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) phba->ktime_seg2_max = seg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) phba->ktime_seg3_total += seg3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (seg3 < phba->ktime_seg3_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) phba->ktime_seg3_min = seg3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) else if (seg3 > phba->ktime_seg3_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) phba->ktime_seg3_max = seg3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) phba->ktime_seg4_total += seg4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (seg4 < phba->ktime_seg4_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) phba->ktime_seg4_min = seg4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) else if (seg4 > phba->ktime_seg4_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) phba->ktime_seg4_max = seg4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) phba->ktime_seg5_total += seg5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) if (seg5 < phba->ktime_seg5_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) phba->ktime_seg5_min = seg5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) else if (seg5 > phba->ktime_seg5_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) phba->ktime_seg5_max = seg5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) phba->ktime_data_samples++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) if (!seg6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) phba->ktime_seg6_total += seg6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) if (seg6 < phba->ktime_seg6_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) phba->ktime_seg6_min = seg6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) else if (seg6 > phba->ktime_seg6_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) phba->ktime_seg6_max = seg6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) phba->ktime_seg7_total += seg7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (seg7 < phba->ktime_seg7_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) phba->ktime_seg7_min = seg7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) else if (seg7 > phba->ktime_seg7_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) phba->ktime_seg7_max = seg7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) phba->ktime_seg8_total += seg8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) if (seg8 < phba->ktime_seg8_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) phba->ktime_seg8_min = seg8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) else if (seg8 > phba->ktime_seg8_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) phba->ktime_seg8_max = seg8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) phba->ktime_seg9_total += seg9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (seg9 < phba->ktime_seg9_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) phba->ktime_seg9_min = seg9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) else if (seg9 > phba->ktime_seg9_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) phba->ktime_seg9_max = seg9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) phba->ktime_seg10_total += seg10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) if (seg10 < phba->ktime_seg10_min)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) phba->ktime_seg10_min = seg10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) else if (seg10 > phba->ktime_seg10_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) phba->ktime_seg10_max = seg10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) phba->ktime_status_samples++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) * @phba: Pointer to HBA context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * @cmdwqe: Pointer to driver command WQE object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * @wcqe: Pointer to driver response CQE object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * The function is called from SLI ring event handler with no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * lock held. This function is the completion handler for NVME FCP commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * The function frees memory resources used for the NVME commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) struct lpfc_wcqe_complete *wcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) struct nvmefc_tgt_fcp_req *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) struct lpfc_async_xchg_ctx *ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) uint32_t status, result, op, start_clean, logerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) ctxp = cmdwqe->context2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) ctxp->flag &= ~LPFC_NVME_IO_INP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) rsp = &ctxp->hdlrctx.fcp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) op = rsp->op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) status = bf_get(lpfc_wcqe_c_status, wcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) result = wcqe->parameter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) if (phba->targetport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) tgtp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) ctxp->oxid, op, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) rsp->transferred_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (tgtp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) atomic_inc(&tgtp->xmt_fcp_rsp_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) if (result == IOERR_ABORT_REQUESTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) atomic_inc(&tgtp->xmt_fcp_rsp_aborted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) logerr = LOG_NVME_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) /* pick up SLI4 exhange busy condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) ctxp->flag |= LPFC_NVME_XBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) logerr |= LOG_NVME_ABTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) if (tgtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) atomic_inc(&tgtp->xmt_fcp_rsp_xb_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) ctxp->flag &= ~LPFC_NVME_XBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) lpfc_printf_log(phba, KERN_INFO, logerr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) "6315 IO Error Cmpl oxid: x%x xri: x%x %x/%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) "XBUSY:x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) ctxp->oxid, ctxp->ctxbuf->sglq->sli4_xritag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) status, result, ctxp->flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) rsp->fcp_error = NVME_SC_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) if (op == NVMET_FCOP_RSP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) rsp->transferred_length = rsp->rsplen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) rsp->transferred_length = rsp->transfer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) if (tgtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if ((op == NVMET_FCOP_READDATA_RSP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) (op == NVMET_FCOP_RSP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) ctxp->state = LPFC_NVME_STE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ctxp->entry_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) if (ctxp->ts_cmd_nvme) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (rsp->op == NVMET_FCOP_READDATA_RSP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) ctxp->ts_isr_data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) cmdwqe->isr_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) ctxp->ts_data_nvme =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) ctxp->ts_nvme_status =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) ctxp->ts_data_nvme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) ctxp->ts_status_wqput =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) ctxp->ts_data_nvme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) ctxp->ts_isr_status =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) ctxp->ts_data_nvme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) ctxp->ts_status_nvme =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) ctxp->ts_data_nvme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) ctxp->ts_isr_status =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) cmdwqe->isr_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) ctxp->ts_status_nvme =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) rsp->done(rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (ctxp->ts_cmd_nvme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) lpfc_nvmet_ktime(phba, ctxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) ctxp->entry_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) start_clean = offsetof(struct lpfc_iocbq, iocb_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) memset(((char *)cmdwqe) + start_clean, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) (sizeof(struct lpfc_iocbq) - start_clean));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (ctxp->ts_cmd_nvme) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) ctxp->ts_isr_data = cmdwqe->isr_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) ctxp->ts_data_nvme = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) rsp->done(rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) id = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) if (ctxp->cpu != id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) "6704 CPU Check cmdcmpl: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) "cpu %d expect %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) id, ctxp->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * __lpfc_nvme_xmt_ls_rsp - Generic service routine to issue transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * an NVME LS rsp for a prior NVME LS request that was received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) * @axchg: pointer to exchange context for the NVME LS request the response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * is for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * @ls_rsp: pointer to the transport LS RSP that is to be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) * @xmt_ls_rsp_cmp: completion routine to call upon RSP transmit done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) * This routine is used to format and send a WQE to transmit a NVME LS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * Response. The response is for a prior NVME LS request that was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * received and posted to the transport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * 0 : if response successfully transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * non-zero : if response failed to transmit, of the form -Exxx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) struct nvmefc_ls_rsp *ls_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) struct lpfc_iocbq *cmdwqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) struct lpfc_wcqe_complete *wcqe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) struct lpfc_hba *phba = axchg->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct hbq_dmabuf *nvmebuf = (struct hbq_dmabuf *)axchg->rqb_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct lpfc_iocbq *nvmewqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct lpfc_dmabuf dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct ulp_bde64 bpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (phba->pport->load_flag & FC_UNLOADING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) "6023 NVMEx LS rsp oxid x%x\n", axchg->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (axchg->state != LPFC_NVME_STE_LS_RCV || axchg->entry_cnt != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) "6412 NVMEx LS rsp state mismatch "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) "oxid x%x: %d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) axchg->oxid, axchg->state, axchg->entry_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) return -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) axchg->state = LPFC_NVME_STE_LS_RSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) axchg->entry_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, axchg, ls_rsp->rspdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) ls_rsp->rsplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (nvmewqeq == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) "6150 NVMEx LS Drop Rsp x%x: Prep\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) axchg->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) goto out_free_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /* Save numBdes for bpl2sgl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) nvmewqeq->rsvd2 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) nvmewqeq->hba_wqidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) nvmewqeq->context3 = &dmabuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) dmabuf.virt = &bpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) bpl.tus.f.bdeSize = ls_rsp->rsplen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) bpl.tus.f.bdeFlags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) bpl.tus.w = le32_to_cpu(bpl.tus.w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * Note: although we're using stack space for the dmabuf, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * call to lpfc_sli4_issue_wqe is synchronous, so it will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * be referenced after it returns back to this routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) nvmewqeq->wqe_cmpl = xmt_ls_rsp_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) nvmewqeq->iocb_cmpl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) nvmewqeq->context2 = axchg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) lpfc_nvmeio_data(phba, "NVMEx LS RSP: xri x%x wqidx x%x len x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) axchg->oxid, nvmewqeq->hba_wqidx, ls_rsp->rsplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) rc = lpfc_sli4_issue_wqe(phba, axchg->hdwq, nvmewqeq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /* clear to be sure there's no reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) nvmewqeq->context3 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) if (rc == WQE_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * Okay to repost buffer here, but wait till cmpl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * before freeing ctxp and iocbq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) lpfc_in_buf_free(phba, &nvmebuf->dbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) "6151 NVMEx LS RSP x%x: failed to transmit %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) axchg->oxid, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) lpfc_nlp_put(nvmewqeq->context1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) out_free_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) /* Give back resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) lpfc_in_buf_free(phba, &nvmebuf->dbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * As transport doesn't track completions of responses, if the rsp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * fails to send, the transport will effectively ignore the rsp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * and consider the LS done. However, the driver has an active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) * exchange open for the LS - so be sure to abort the exchange
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) * if the response isn't sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) lpfc_nvme_unsol_ls_issue_abort(phba, axchg, axchg->sid, axchg->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) * lpfc_nvmet_xmt_ls_rsp - Transmit NVME LS response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * @tgtport: pointer to target port that NVME LS is to be transmit from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * @ls_rsp: pointer to the transport LS RSP that is to be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * Driver registers this routine to transmit responses for received NVME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * LS requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * This routine is used to format and send a WQE to transmit a NVME LS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * Response. The ls_rsp is used to reverse-map the LS to the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * NVME LS request sequence, which provides addressing information for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * the remote port the LS to be sent to, as well as the exchange id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * that is the LS is bound to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * 0 : if response successfully transmit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) * non-zero : if response failed to transmit, of the form -Exxx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) struct nvmefc_ls_rsp *ls_rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) struct lpfc_async_xchg_ctx *axchg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) if (axchg->phba->pport->load_flag & FC_UNLOADING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, lpfc_nvmet_xmt_ls_rsp_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) atomic_inc(&nvmep->xmt_ls_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * unless the failure is due to having already sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * the response, an abort will be generated for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * exchange if the rsp can't be sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) if (rc != -EALREADY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) atomic_inc(&nvmep->xmt_ls_abort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) atomic_inc(&nvmep->xmt_ls_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) struct nvmefc_tgt_fcp_req *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct lpfc_async_xchg_ctx *ctxp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) struct lpfc_hba *phba = ctxp->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) struct lpfc_queue *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) struct lpfc_iocbq *nvmewqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) struct lpfc_sli_ring *pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) if (phba->pport->load_flag & FC_UNLOADING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) goto aerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (ctxp->ts_cmd_nvme) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) if (rsp->op == NVMET_FCOP_RSP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) ctxp->ts_nvme_status = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) ctxp->ts_nvme_data = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /* Setup the hdw queue if not already set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) if (!ctxp->hdwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) id = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) if (rsp->hwqid != id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) "6705 CPU Check OP: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) "cpu %d expect %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) id, rsp->hwqid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) ctxp->cpu = id; /* Setup cpu for cmpl check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) if ((ctxp->flag & LPFC_NVME_ABTS_RCV) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) (ctxp->state == LPFC_NVME_STE_ABORT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) "6102 IO oxid x%x aborted\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) goto aerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) if (nvmewqeq == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) "6152 FCP Drop IO x%x: Prep\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) goto aerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) nvmewqeq->iocb_cmpl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) nvmewqeq->context2 = ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) ctxp->wqeq->hba_wqidx = rsp->hwqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) ctxp->oxid, rsp->op, rsp->rsplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) ctxp->flag |= LPFC_NVME_IO_INP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) if (rc == WQE_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) if (!ctxp->ts_cmd_nvme)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (rsp->op == NVMET_FCOP_RSP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) ctxp->ts_status_wqput = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) ctxp->ts_data_wqput = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) if (rc == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) * WQ was full, so queue nvmewqeq to be sent after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * WQE release CQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) ctxp->flag |= LPFC_NVME_DEFER_WQFULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) wq = ctxp->hdwq->io_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) pring = wq->pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) spin_lock_irqsave(&pring->ring_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) wq->q_flag |= HBA_NVMET_WQFULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) spin_unlock_irqrestore(&pring->ring_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) atomic_inc(&lpfc_nvmep->defer_wqfull);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) /* Give back resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) "6153 FCP Drop IO x%x: Issue: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) ctxp->oxid, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) ctxp->wqeq->hba_wqidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) nvmewqeq->context2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) nvmewqeq->context3 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) aerr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct lpfc_nvmet_tgtport *tport = targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* release any threads waiting for the unreg to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (tport->phba->targetport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) complete(tport->tport_unreg_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) struct nvmefc_tgt_fcp_req *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) struct lpfc_async_xchg_ctx *ctxp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) container_of(req, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) struct lpfc_hba *phba = ctxp->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) struct lpfc_queue *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) if (phba->pport->load_flag & FC_UNLOADING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) if (!ctxp->hdwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) ctxp->hdwq = &phba->sli4_hba.hdwq[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) "6103 NVMET Abort op: oxid x%x flg x%x ste %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) ctxp->oxid, ctxp->flag, ctxp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) ctxp->oxid, ctxp->flag, ctxp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) spin_lock_irqsave(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) /* Since iaab/iaar are NOT set, we need to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) * if the firmware is in process of aborting IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) if (ctxp->flag & (LPFC_NVME_XBUSY | LPFC_NVME_ABORT_OP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) spin_unlock_irqrestore(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) ctxp->flag |= LPFC_NVME_ABORT_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) if (ctxp->flag & LPFC_NVME_DEFER_WQFULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) spin_unlock_irqrestore(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) wq = ctxp->hdwq->io_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) spin_unlock_irqrestore(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) /* A state of LPFC_NVME_STE_RCV means we have just received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * the NVME command and have not started processing it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) * (by issuing any IO WQEs on this exchange yet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) if (ctxp->state == LPFC_NVME_STE_RCV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) struct nvmefc_tgt_fcp_req *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) struct lpfc_async_xchg_ctx *ctxp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) struct lpfc_hba *phba = ctxp->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) bool aborting = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) spin_lock_irqsave(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (ctxp->flag & LPFC_NVME_XBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) "6027 NVMET release with XBUSY flag x%x"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) " oxid x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) ctxp->flag, ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) else if (ctxp->state != LPFC_NVME_STE_DONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) ctxp->state != LPFC_NVME_STE_ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) "6413 NVMET release bad state %d %d oxid x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) ctxp->state, ctxp->entry_cnt, ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if ((ctxp->flag & LPFC_NVME_ABORT_OP) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) (ctxp->flag & LPFC_NVME_XBUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) aborting = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /* let the abort path do the real release */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) lpfc_nvmet_defer_release(phba, ctxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) spin_unlock_irqrestore(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d abt %d\n", ctxp->oxid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) ctxp->state, aborting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) atomic_inc(&lpfc_nvmep->xmt_fcp_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) ctxp->flag &= ~LPFC_NVME_TNOTIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) if (aborting)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct nvmefc_tgt_fcp_req *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) struct lpfc_async_xchg_ctx *ctxp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) container_of(rsp, struct lpfc_async_xchg_ctx, hdlrctx.fcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) struct lpfc_hba *phba = ctxp->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) unsigned long iflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) ctxp->oxid, ctxp->size, raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (!nvmebuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) "6425 Defer rcv: no buffer oxid x%x: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) "flg %x ste %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) ctxp->oxid, ctxp->flag, ctxp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) tgtp = phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (tgtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) atomic_inc(&tgtp->rcv_fcp_cmd_defer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) /* Free the nvmebuf since a new buffer already replaced it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) spin_lock_irqsave(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) ctxp->rqb_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) * lpfc_nvmet_ls_req_cmp - completion handler for a nvme ls request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) * @phba: Pointer to HBA context object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * @cmdwqe: Pointer to driver command WQE object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * @wcqe: Pointer to driver response CQE object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) * This function is the completion handler for NVME LS requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) * The function updates any states and statistics, then calls the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) * generic completion handler to finish completion of the request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) lpfc_nvmet_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) struct lpfc_wcqe_complete *wcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) __lpfc_nvme_ls_req_cmp(phba, cmdwqe->vport, cmdwqe, wcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * lpfc_nvmet_ls_req - Issue an Link Service request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * @targetport - pointer to target instance registered with nvmet transport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * Driver sets this value to the ndlp pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * @pnvme_lsreq - the transport nvme_ls_req structure for the LS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * Driver registers this routine to handle any link service request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * from the nvme_fc transport to a remote nvme-aware port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) * Return value :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) * non-zero: various error codes, in form of -Exxx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) lpfc_nvmet_ls_req(struct nvmet_fc_target_port *targetport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) void *hosthandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) struct nvmefc_ls_req *pnvme_lsreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct lpfc_hba *phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) u32 hstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) if (!lpfc_nvmet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) phba = lpfc_nvmet->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (phba->pport->load_flag & FC_UNLOADING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) hstate = atomic_read(&lpfc_nvmet->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) if (hstate == LPFC_NVMET_INV_HOST_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) ndlp = (struct lpfc_nodelist *)hosthandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) ret = __lpfc_nvme_ls_req(phba->pport, ndlp, pnvme_lsreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) lpfc_nvmet_ls_req_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) * lpfc_nvmet_ls_abort - Abort a prior NVME LS request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * @targetport: Transport targetport, that LS was issued from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * @hosthandle - hosthandle set by the driver in a prior ls_rqst_rcv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) * Driver sets this value to the ndlp pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) * @pnvme_lsreq - the transport nvme_ls_req structure for LS to be aborted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) * Driver registers this routine to abort an NVME LS request that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) * in progress (from the transports perspective).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) lpfc_nvmet_ls_abort(struct nvmet_fc_target_port *targetport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) void *hosthandle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) struct nvmefc_ls_req *pnvme_lsreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct lpfc_nvmet_tgtport *lpfc_nvmet = targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) struct lpfc_hba *phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) phba = lpfc_nvmet->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) if (phba->pport->load_flag & FC_UNLOADING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) ndlp = (struct lpfc_nodelist *)hosthandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) ret = __lpfc_nvme_ls_abort(phba->pport, ndlp, pnvme_lsreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) atomic_inc(&lpfc_nvmet->xmt_ls_abort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) lpfc_nvmet_host_release(void *hosthandle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) struct lpfc_nodelist *ndlp = hosthandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) struct lpfc_hba *phba = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) phba = ndlp->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) if (!phba->targetport || !phba->targetport->private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) "6202 NVMET XPT releasing hosthandle x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) hosthandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) atomic_set(&tgtp->state, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) lpfc_nvmet_discovery_event(struct nvmet_fc_target_port *tgtport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) struct lpfc_hba *phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) uint32_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) tgtp = tgtport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) phba = tgtp->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) rc = lpfc_issue_els_rscn(phba->pport, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) "6420 NVMET subsystem change: Notification %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) (rc) ? "Failed" : "Sent");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) static struct nvmet_fc_target_template lpfc_tgttemplate = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) .targetport_delete = lpfc_nvmet_targetport_delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) .fcp_op = lpfc_nvmet_xmt_fcp_op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) .defer_rcv = lpfc_nvmet_defer_rcv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) .discovery_event = lpfc_nvmet_discovery_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) .ls_req = lpfc_nvmet_ls_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) .ls_abort = lpfc_nvmet_ls_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) .host_release = lpfc_nvmet_host_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) .max_hw_queues = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) .dma_boundary = 0xFFFFFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) /* optional features */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) .target_features = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) /* sizes of additional private data for data structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) .lsrqst_priv_sz = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) __lpfc_nvmet_clean_io_for_cpu(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct lpfc_nvmet_ctx_info *infop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) spin_lock_irqsave(&infop->nvmet_ctx_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) list_for_each_entry_safe(ctx_buf, next_ctx_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) &infop->nvmet_ctx_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) list_del_init(&ctx_buf->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) __lpfc_clear_active_sglq(phba, ctx_buf->sglq->sli4_lxritag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) ctx_buf->sglq->state = SGL_FREED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) ctx_buf->sglq->ndlp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) spin_lock(&phba->sli4_hba.sgl_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) list_add_tail(&ctx_buf->sglq->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) &phba->sli4_hba.lpfc_nvmet_sgl_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) spin_unlock(&phba->sli4_hba.sgl_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) kfree(ctx_buf->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) spin_unlock_irqrestore(&infop->nvmet_ctx_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) struct lpfc_nvmet_ctx_info *infop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) /* The first context list, MRQ 0 CPU 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) infop = phba->sli4_hba.nvmet_ctx_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) if (!infop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) /* Cycle the the entire CPU context list for every MRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) for_each_present_cpu(j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) infop = lpfc_get_ctx_list(phba, j, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) __lpfc_nvmet_clean_io_for_cpu(phba, infop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) kfree(phba->sli4_hba.nvmet_ctx_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) phba->sli4_hba.nvmet_ctx_info = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) struct lpfc_nvmet_ctxbuf *ctx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) struct lpfc_iocbq *nvmewqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) union lpfc_wqe128 *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) struct lpfc_nvmet_ctx_info *last_infop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) struct lpfc_nvmet_ctx_info *infop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) int i, j, idx, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) "6403 Allocate NVMET resources for %d XRIs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) phba->sli4_hba.nvmet_xri_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) phba->sli4_hba.nvmet_ctx_info = kcalloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) phba->sli4_hba.num_possible_cpu * phba->cfg_nvmet_mrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) sizeof(struct lpfc_nvmet_ctx_info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) if (!phba->sli4_hba.nvmet_ctx_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) "6419 Failed allocate memory for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) "nvmet context lists\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * Assuming X CPUs in the system, and Y MRQs, allocate some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) * lpfc_nvmet_ctx_info structures as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) * cpu0/mrq0 cpu1/mrq0 ... cpuX/mrq0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) * cpu0/mrq1 cpu1/mrq1 ... cpuX/mrq1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * cpuX/mrqY cpuX/mrqY ... cpuX/mrqY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) * Each line represents a MRQ "silo" containing an entry for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) * every CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * MRQ X is initially assumed to be associated with CPU X, thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) * contexts are initially distributed across all MRQs using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) * the MRQ index (N) as follows cpuN/mrqN. When contexts are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * freed, the are freed to the MRQ silo based on the CPU number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * of the IO completion. Thus a context that was allocated for MRQ A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * whose IO completed on CPU B will be freed to cpuB/mrqA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) for_each_possible_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) infop = lpfc_get_ctx_list(phba, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) INIT_LIST_HEAD(&infop->nvmet_ctx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) spin_lock_init(&infop->nvmet_ctx_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) infop->nvmet_ctx_list_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * Setup the next CPU context info ptr for each MRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * MRQ 0 will cycle thru CPUs 0 - X separately from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * MRQ 1 cycling thru CPUs 0 - X, and so on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) last_infop = lpfc_get_ctx_list(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) cpumask_first(cpu_present_mask),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) for (i = phba->sli4_hba.num_possible_cpu - 1; i >= 0; i--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) infop = lpfc_get_ctx_list(phba, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) infop->nvmet_ctx_next_cpu = last_infop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) last_infop = infop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) /* For all nvmet xris, allocate resources needed to process a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) * received command on a per xri basis.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) cpu = cpumask_first(cpu_present_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (!ctx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) "6404 Ran out of memory for NVMET\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (!ctx_buf->context) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) kfree(ctx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) "6405 Ran out of NVMET "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) "context memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) ctx_buf->context->ctxbuf = ctx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) ctx_buf->context->state = LPFC_NVME_STE_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) if (!ctx_buf->iocbq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) kfree(ctx_buf->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) kfree(ctx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) "6406 Ran out of NVMET iocb/WQEs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) nvmewqe = ctx_buf->iocbq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) wqe = &nvmewqe->wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) /* Initialize WQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) memset(wqe, 0, sizeof(union lpfc_wqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) ctx_buf->iocbq->context1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) spin_lock(&phba->sli4_hba.sgl_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) spin_unlock(&phba->sli4_hba.sgl_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (!ctx_buf->sglq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) kfree(ctx_buf->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) kfree(ctx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) "6407 Ran out of NVMET XRIs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) INIT_WORK(&ctx_buf->defer_work, lpfc_nvmet_fcp_rqst_defer_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * Add ctx to MRQidx context list. Our initial assumption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) * is MRQidx will be associated with CPUidx. This association
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) * can change on the fly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) infop = lpfc_get_ctx_list(phba, cpu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) spin_lock(&infop->nvmet_ctx_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) list_add_tail(&ctx_buf->list, &infop->nvmet_ctx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) infop->nvmet_ctx_list_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) spin_unlock(&infop->nvmet_ctx_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) /* Spread ctx structures evenly across all MRQs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (idx >= phba->cfg_nvmet_mrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) cpu = cpumask_first(cpu_present_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) cpu = cpumask_next(cpu, cpu_present_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) if (cpu == nr_cpu_ids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) cpu = cpumask_first(cpu_present_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) for_each_present_cpu(i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) infop = lpfc_get_ctx_list(phba, i, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) "6408 TOTAL NVMET ctx for CPU %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) "MRQ %d: cnt %d nextcpu x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) i, j, infop->nvmet_ctx_list_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) infop->nvmet_ctx_next_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) struct lpfc_vport *vport = phba->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) struct nvmet_fc_port_info pinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) if (phba->targetport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) error = lpfc_nvmet_setup_io_context(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) pinfo.port_id = vport->fc_myDID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) /* We need to tell the transport layer + 1 because it takes page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * alignment into account. When space for the SGL is allocated we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * allocate + 3, one for cmd, one for rsp and one for this alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) lpfc_tgttemplate.max_hw_queues = phba->cfg_hdw_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) &phba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) &phba->targetport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) error = -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) "6025 Cannot register NVME targetport x%x: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) "portnm %llx nodenm %llx segs %d qs %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) pinfo.port_name, pinfo.node_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) lpfc_tgttemplate.max_sgl_segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) lpfc_tgttemplate.max_hw_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) phba->targetport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) phba->nvmet_support = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) lpfc_nvmet_cleanup_io_context(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) tgtp = (struct lpfc_nvmet_tgtport *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) tgtp->phba = phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) "6026 Registered NVME "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) "targetport: x%px, private x%px "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) "portnm %llx nodenm %llx segs %d qs %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) phba->targetport, tgtp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) pinfo.port_name, pinfo.node_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) lpfc_tgttemplate.max_sgl_segments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) lpfc_tgttemplate.max_hw_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) atomic_set(&tgtp->rcv_ls_req_in, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) atomic_set(&tgtp->rcv_ls_req_out, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) atomic_set(&tgtp->rcv_ls_req_drop, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) atomic_set(&tgtp->xmt_ls_abort, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) atomic_set(&tgtp->xmt_ls_rsp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) atomic_set(&tgtp->xmt_ls_drop, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) atomic_set(&tgtp->xmt_ls_rsp_error, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) atomic_set(&tgtp->xmt_ls_rsp_xb_set, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) atomic_set(&tgtp->xmt_ls_rsp_aborted, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) atomic_set(&tgtp->xmt_fcp_drop, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) atomic_set(&tgtp->xmt_fcp_read, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) atomic_set(&tgtp->xmt_fcp_write, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) atomic_set(&tgtp->xmt_fcp_rsp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) atomic_set(&tgtp->xmt_fcp_release, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) atomic_set(&tgtp->xmt_fcp_rsp_xb_set, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) atomic_set(&tgtp->xmt_fcp_rsp_aborted, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) atomic_set(&tgtp->xmt_fcp_xri_abort_cqe, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) atomic_set(&tgtp->xmt_fcp_abort, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) atomic_set(&tgtp->xmt_abort_unsol, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) atomic_set(&tgtp->xmt_abort_sol, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) atomic_set(&tgtp->xmt_abort_rsp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) atomic_set(&tgtp->xmt_abort_rsp_error, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) atomic_set(&tgtp->defer_ctx, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) atomic_set(&tgtp->defer_fod, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) atomic_set(&tgtp->defer_wqfull, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) struct lpfc_vport *vport = phba->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (!phba->targetport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) "6007 Update NVMET port x%px did x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) phba->targetport, vport->fc_myDID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) phba->targetport->port_id = vport->fc_myDID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) * @axri: pointer to the nvmet xri abort wcqe structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) * This routine is invoked by the worker thread to process a SLI4 fast-path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) * NVMET aborted xri.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) struct sli4_wcqe_xri_aborted *axri)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) struct nvmefc_tgt_fcp_req *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) unsigned long iflag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) int rrq_empty = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) bool released = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) if (phba->targetport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) atomic_inc(&tgtp->xmt_fcp_xri_abort_cqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) spin_lock_irqsave(&phba->hbalock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) list_for_each_entry_safe(ctxp, next_ctxp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) spin_lock(&ctxp->ctxlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) /* Check if we already received a free context call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * and we have completed processing an abort situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (ctxp->flag & LPFC_NVME_CTX_RLS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) !(ctxp->flag & LPFC_NVME_ABORT_OP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) list_del_init(&ctxp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) released = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) ctxp->flag &= ~LPFC_NVME_XBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) spin_unlock(&ctxp->ctxlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) rrq_empty = list_empty(&phba->active_rrq_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) spin_unlock_irqrestore(&phba->hbalock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) lpfc_set_rrq_active(phba, ndlp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) ctxp->ctxbuf->sglq->sli4_lxritag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) rxid, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) lpfc_sli4_abts_err_handler(phba, ndlp, axri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) "6318 XB aborted oxid x%x flg x%x (%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) ctxp->oxid, ctxp->flag, released);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (released)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) if (rrq_empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) lpfc_worker_wake_up(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) spin_unlock_irqrestore(&phba->hbalock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) ctxp = lpfc_nvmet_get_ctx_for_xri(phba, xri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) if (ctxp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) * Abort already done by FW, so BA_ACC sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * However, the transport may be unaware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) "6323 NVMET Rcv ABTS xri x%x ctxp state x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) "flag x%x oxid x%x rxid x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) xri, ctxp->state, ctxp->flag, ctxp->oxid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) rxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) spin_lock_irqsave(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) ctxp->flag |= LPFC_NVME_ABTS_RCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) ctxp->state = LPFC_NVME_STE_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) lpfc_nvmeio_data(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) xri, raw_smp_processor_id(), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) req = &ctxp->hdlrctx.fcp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) if (req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) nvmet_fc_rcv_fcp_abort(phba->targetport, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) struct fc_frame_header *fc_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) struct lpfc_hba *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) struct lpfc_async_xchg_ctx *ctxp, *next_ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) struct nvmefc_tgt_fcp_req *rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) uint32_t sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) uint16_t oxid, xri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) unsigned long iflag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) sid = sli4_sid_from_fc_hdr(fc_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) oxid = be16_to_cpu(fc_hdr->fh_ox_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) spin_lock_irqsave(&phba->hbalock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) list_for_each_entry_safe(ctxp, next_ctxp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) if (ctxp->oxid != oxid || ctxp->sid != sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) xri = ctxp->ctxbuf->sglq->sli4_xritag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) spin_unlock_irqrestore(&phba->hbalock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) spin_lock_irqsave(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) ctxp->flag |= LPFC_NVME_ABTS_RCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) lpfc_nvmeio_data(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) xri, raw_smp_processor_id(), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) rsp = &ctxp->hdlrctx.fcp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) /* Respond with BA_ACC accordingly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) spin_unlock_irqrestore(&phba->hbalock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) /* check the wait list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (phba->sli4_hba.nvmet_io_wait_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) struct rqb_dmabuf *nvmebuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) struct fc_frame_header *fc_hdr_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) u32 sid_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) u16 oxid_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) /* match by oxid and s_id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) list_for_each_entry(nvmebuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) &phba->sli4_hba.lpfc_nvmet_io_wait_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) hbuf.list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) fc_hdr_tmp = (struct fc_frame_header *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) (nvmebuf->hbuf.virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) oxid_tmp = be16_to_cpu(fc_hdr_tmp->fh_ox_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) sid_tmp = sli4_sid_from_fc_hdr(fc_hdr_tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) if (oxid_tmp != oxid || sid_tmp != sid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) "6321 NVMET Rcv ABTS oxid x%x from x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) "is waiting for a ctxp\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) oxid, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) list_del_init(&nvmebuf->hbuf.list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) phba->sli4_hba.nvmet_io_wait_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) /* free buffer since already posted a new DMA buffer to RQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) /* Respond with BA_ACC accordingly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) /* check active list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) ctxp = lpfc_nvmet_get_ctx_for_oxid(phba, oxid, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) if (ctxp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) xri = ctxp->ctxbuf->sglq->sli4_xritag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) spin_lock_irqsave(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) ctxp->flag |= (LPFC_NVME_ABTS_RCV | LPFC_NVME_ABORT_OP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) lpfc_nvmeio_data(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) xri, raw_smp_processor_id(), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) "6322 NVMET Rcv ABTS:acc oxid x%x xri x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) "flag x%x state x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) ctxp->oxid, xri, ctxp->flag, ctxp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) if (ctxp->flag & LPFC_NVME_TNOTIFY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) /* Notify the transport */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) nvmet_fc_rcv_fcp_abort(phba->targetport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) &ctxp->hdlrctx.fcp_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) cancel_work_sync(&ctxp->ctxbuf->defer_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) spin_lock_irqsave(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) lpfc_nvmet_defer_release(phba, ctxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) lpfc_nvmeio_data(phba, "NVMET ABTS RCV: oxid x%x CPU %02x rjt %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) oxid, raw_smp_processor_id(), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) "6320 NVMET Rcv ABTS:rjt oxid x%x\n", oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) /* Respond with BA_RJT accordingly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) struct lpfc_async_xchg_ctx *ctxp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) struct lpfc_sli_ring *pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) struct lpfc_iocbq *nvmewqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) struct lpfc_iocbq *next_nvmewqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) struct lpfc_wcqe_complete wcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) struct lpfc_wcqe_complete *wcqep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) pring = wq->pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) wcqep = &wcqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) /* Fake an ABORT error code back to cmpl routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) memset(wcqep, 0, sizeof(struct lpfc_wcqe_complete));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) bf_set(lpfc_wcqe_c_status, wcqep, IOSTAT_LOCAL_REJECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) wcqep->parameter = IOERR_ABORT_REQUESTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) spin_lock_irqsave(&pring->ring_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) list_for_each_entry_safe(nvmewqeq, next_nvmewqeq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) &wq->wqfull_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) if (ctxp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) /* Checking for a specific IO to flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) if (nvmewqeq->context2 == ctxp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) list_del(&nvmewqeq->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) spin_unlock_irqrestore(&pring->ring_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) wcqep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) /* Flush all IOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) list_del(&nvmewqeq->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) spin_unlock_irqrestore(&pring->ring_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) lpfc_nvmet_xmt_fcp_op_cmp(phba, nvmewqeq, wcqep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) spin_lock_irqsave(&pring->ring_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (!ctxp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) wq->q_flag &= ~HBA_NVMET_WQFULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) spin_unlock_irqrestore(&pring->ring_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) struct lpfc_queue *wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) struct lpfc_sli_ring *pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) struct lpfc_iocbq *nvmewqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) struct lpfc_async_xchg_ctx *ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) * Some WQE slots are available, so try to re-issue anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) * on the WQ wqfull_list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) pring = wq->pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) spin_lock_irqsave(&pring->ring_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) while (!list_empty(&wq->wqfull_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) spin_unlock_irqrestore(&pring->ring_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) ctxp = (struct lpfc_async_xchg_ctx *)nvmewqeq->context2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) spin_lock_irqsave(&pring->ring_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) if (rc == -EBUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) /* WQ was full again, so put it back on the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) list_add(&nvmewqeq->list, &wq->wqfull_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) spin_unlock_irqrestore(&pring->ring_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) if (rc == WQE_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) if (ctxp->ts_cmd_nvme) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) if (ctxp->hdlrctx.fcp_req.op == NVMET_FCOP_RSP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) ctxp->ts_status_wqput = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) ctxp->ts_data_wqput = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) WARN_ON(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) wq->q_flag &= ~HBA_NVMET_WQFULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) spin_unlock_irqrestore(&pring->ring_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) struct lpfc_queue *wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) uint32_t qidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) if (phba->nvmet_support == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) if (phba->targetport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) wq = phba->sli4_hba.hdwq[qidx].io_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) lpfc_nvmet_wqfull_flush(phba, wq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) tgtp->tport_unreg_cmp = &tport_unreg_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) nvmet_fc_unregister_targetport(phba->targetport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) if (!wait_for_completion_timeout(&tport_unreg_cmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) "6179 Unreg targetport x%px timeout "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) "reached.\n", phba->targetport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) lpfc_nvmet_cleanup_io_context(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) phba->targetport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) * lpfc_nvmet_handle_lsreq - Process an NVME LS request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) * @axchg: pointer to exchange context for the NVME LS request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) * This routine is used for processing an asychronously received NVME LS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) * request. Any remaining validation is done and the LS is then forwarded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * to the nvmet-fc transport via nvmet_fc_rcv_ls_req().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * The calling sequence should be: nvmet_fc_rcv_ls_req() -> (processing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) * -> lpfc_nvmet_xmt_ls_rsp/cmp -> req->done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) * lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) * Returns 0 if LS was handled and delivered to the transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) * Returns 1 if LS failed to be handled and should be dropped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) lpfc_nvmet_handle_lsreq(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) struct lpfc_async_xchg_ctx *axchg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) struct lpfc_nvmet_tgtport *tgtp = phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) uint32_t *payload = axchg->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) atomic_inc(&tgtp->rcv_ls_req_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) * Driver passes the ndlp as the hosthandle argument allowing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) * the transport to generate LS requests for any associateions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) * that are created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) rc = nvmet_fc_rcv_ls_req(phba->targetport, axchg->ndlp, &axchg->ls_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) axchg->payload, axchg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) "6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) "%08x %08x %08x\n", axchg->size, rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) *payload, *(payload+1), *(payload+2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) *(payload+3), *(payload+4), *(payload+5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) atomic_inc(&tgtp->rcv_ls_req_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) atomic_inc(&tgtp->rcv_ls_req_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) lpfc_nvmet_process_rcv_fcp_req(struct lpfc_nvmet_ctxbuf *ctx_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) struct lpfc_async_xchg_ctx *ctxp = ctx_buf->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) struct lpfc_hba *phba = ctxp->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) struct rqb_dmabuf *nvmebuf = ctxp->rqb_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) uint32_t *payload, qno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) uint32_t rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) if (!nvmebuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) "6159 process_rcv_fcp_req, nvmebuf is NULL, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) "oxid: x%x flg: x%x state: x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) ctxp->oxid, ctxp->flag, ctxp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) spin_lock_irqsave(&ctxp->ctxlock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) lpfc_nvmet_defer_release(phba, ctxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) if (ctxp->flag & LPFC_NVME_ABTS_RCV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) "6324 IO oxid x%x aborted\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) payload = (uint32_t *)(nvmebuf->dbuf.virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) ctxp->flag |= LPFC_NVME_TNOTIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) if (ctxp->ts_isr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) ctxp->ts_cmd_nvme = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) * The calling sequence should be:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) * When we return from nvmet_fc_rcv_fcp_req, all relevant info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) * the NVME command / FC header is stored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) * A buffer has already been reposted for this IO, so just free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) * the nvmebuf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->hdlrctx.fcp_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) payload, ctxp->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) /* Process FCP command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) atomic_inc(&tgtp->rcv_fcp_cmd_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) spin_lock_irqsave(&ctxp->ctxlock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) if ((ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) (nvmebuf != ctxp->rqb_buffer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) ctxp->rqb_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) /* Processing of FCP command is deferred */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) if (rc == -EOVERFLOW) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) lpfc_nvmeio_data(phba, "NVMET RCV BUSY: xri x%x sz %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) "from %06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) ctxp->oxid, ctxp->size, ctxp->sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) atomic_inc(&tgtp->rcv_fcp_cmd_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) atomic_inc(&tgtp->defer_fod);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) spin_lock_irqsave(&ctxp->ctxlock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) if (ctxp->flag & LPFC_NVME_CTX_REUSE_WQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) * Post a replacement DMA buffer to RQ and defer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) * freeing rcv buffer till .defer_rcv callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) qno = nvmebuf->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) lpfc_post_rq_buffer(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) ctxp->flag &= ~LPFC_NVME_TNOTIFY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) atomic_inc(&tgtp->rcv_fcp_cmd_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) ctxp->oxid, rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) atomic_read(&tgtp->rcv_fcp_cmd_in),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) atomic_read(&tgtp->rcv_fcp_cmd_out),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) atomic_read(&tgtp->xmt_fcp_release));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) ctxp->oxid, ctxp->size, ctxp->sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) spin_lock_irqsave(&ctxp->ctxlock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) lpfc_nvmet_defer_release(phba, ctxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) spin_unlock_irqrestore(&ctxp->ctxlock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) lpfc_nvmet_fcp_rqst_defer_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) struct lpfc_nvmet_ctxbuf *ctx_buf =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) container_of(work, struct lpfc_nvmet_ctxbuf, defer_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) static struct lpfc_nvmet_ctxbuf *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) lpfc_nvmet_replenish_context(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) struct lpfc_nvmet_ctx_info *current_infop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) struct lpfc_nvmet_ctxbuf *ctx_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) struct lpfc_nvmet_ctx_info *get_infop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) * The current_infop for the MRQ a NVME command IU was received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) * on is empty. Our goal is to replenish this MRQs context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) * list from a another CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) * First we need to pick a context list to start looking on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) * nvmet_ctx_start_cpu has available context the last time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) * we needed to replenish this CPU where nvmet_ctx_next_cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) * is just the next sequential CPU for this MRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) if (current_infop->nvmet_ctx_start_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) get_infop = current_infop->nvmet_ctx_start_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) get_infop = current_infop->nvmet_ctx_next_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) for (i = 0; i < phba->sli4_hba.num_possible_cpu; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (get_infop == current_infop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) get_infop = get_infop->nvmet_ctx_next_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) spin_lock(&get_infop->nvmet_ctx_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) /* Just take the entire context list, if there are any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) if (get_infop->nvmet_ctx_list_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) list_splice_init(&get_infop->nvmet_ctx_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) ¤t_infop->nvmet_ctx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) current_infop->nvmet_ctx_list_cnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) get_infop->nvmet_ctx_list_cnt - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) get_infop->nvmet_ctx_list_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) spin_unlock(&get_infop->nvmet_ctx_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) current_infop->nvmet_ctx_start_cpu = get_infop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) list_remove_head(¤t_infop->nvmet_ctx_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) ctx_buf, struct lpfc_nvmet_ctxbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) return ctx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) /* Otherwise, move on to the next CPU for this MRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) spin_unlock(&get_infop->nvmet_ctx_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) get_infop = get_infop->nvmet_ctx_next_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) /* Nothing found, all contexts for the MRQ are in-flight */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) * @idx: relative index of MRQ vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) * @isr_timestamp: in jiffies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) * @cqflag: cq processing information regarding workload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) * This routine is used for processing the WQE associated with a unsolicited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) * event. It first determines whether there is an existing ndlp that matches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) * the DID from the unsolicited WQE. If not, it will create a new one with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) * the DID from the unsolicited WQE. The ELS command from the unsolicited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) * WQE is then used to invoke the proper routine and to set up proper state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) * of the discovery state machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) uint32_t idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) struct rqb_dmabuf *nvmebuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) uint64_t isr_timestamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) uint8_t cqflag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) struct lpfc_async_xchg_ctx *ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) struct fc_frame_header *fc_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) struct lpfc_nvmet_ctxbuf *ctx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) struct lpfc_nvmet_ctx_info *current_infop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) uint32_t size, oxid, sid, qno;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) unsigned long iflag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) int current_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) if (!IS_ENABLED(CONFIG_NVME_TARGET_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) ctx_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) if (!nvmebuf || !phba->targetport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) "6157 NVMET FCP Drop IO\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) if (nvmebuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) * Get a pointer to the context list for this MRQ based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) * the CPU this MRQ IRQ is associated with. If the CPU association
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) * changes from our initial assumption, the context list could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) * be empty, thus it would need to be replenished with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) * context list from another CPU for this MRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) current_cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) current_infop = lpfc_get_ctx_list(phba, current_cpu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) spin_lock_irqsave(¤t_infop->nvmet_ctx_list_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) if (current_infop->nvmet_ctx_list_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) list_remove_head(¤t_infop->nvmet_ctx_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) ctx_buf, struct lpfc_nvmet_ctxbuf, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) current_infop->nvmet_ctx_list_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) ctx_buf = lpfc_nvmet_replenish_context(phba, current_infop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) spin_unlock_irqrestore(¤t_infop->nvmet_ctx_list_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) oxid = be16_to_cpu(fc_hdr->fh_ox_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) size = nvmebuf->bytes_recv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) if (idx != current_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) "6703 CPU Check rcv: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) "cpu %d expect %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) current_cpu, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) oxid, size, raw_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) if (!ctx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) /* Queue this NVME IO to process later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) list_add_tail(&nvmebuf->hbuf.list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) &phba->sli4_hba.lpfc_nvmet_io_wait_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) phba->sli4_hba.nvmet_io_wait_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) phba->sli4_hba.nvmet_io_wait_total++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) /* Post a brand new DMA buffer to RQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) qno = nvmebuf->idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) lpfc_post_rq_buffer(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) atomic_inc(&tgtp->defer_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) sid = sli4_sid_from_fc_hdr(fc_hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) ctxp = (struct lpfc_async_xchg_ctx *)ctx_buf->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) spin_lock_irqsave(&phba->sli4_hba.t_active_list_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) list_add_tail(&ctxp->list, &phba->sli4_hba.t_active_ctx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) spin_unlock_irqrestore(&phba->sli4_hba.t_active_list_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) if (ctxp->state != LPFC_NVME_STE_FREE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) "6414 NVMET Context corrupt %d %d oxid x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) ctxp->state, ctxp->entry_cnt, ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) ctxp->wqeq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) ctxp->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) ctxp->phba = phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) ctxp->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) ctxp->oxid = oxid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) ctxp->sid = sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) ctxp->idx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) ctxp->state = LPFC_NVME_STE_RCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) ctxp->entry_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) ctxp->flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) ctxp->ctxbuf = ctx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) ctxp->rqb_buffer = (void *)nvmebuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) ctxp->hdwq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) spin_lock_init(&ctxp->ctxlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) if (isr_timestamp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) ctxp->ts_isr_cmd = isr_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) ctxp->ts_cmd_nvme = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) ctxp->ts_nvme_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) ctxp->ts_data_wqput = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) ctxp->ts_isr_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) ctxp->ts_data_nvme = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) ctxp->ts_nvme_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) ctxp->ts_status_wqput = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) ctxp->ts_isr_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) ctxp->ts_status_nvme = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) atomic_inc(&tgtp->rcv_fcp_cmd_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) /* check for cq processing load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) if (!cqflag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) lpfc_nvmet_process_rcv_fcp_req(ctx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) if (!queue_work(phba->wq, &ctx_buf->defer_work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) atomic_inc(&tgtp->rcv_fcp_cmd_drop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) "6325 Unable to queue work for oxid x%x. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) "FCP Drop IO [x%x x%x x%x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) ctxp->oxid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) atomic_read(&tgtp->rcv_fcp_cmd_in),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) atomic_read(&tgtp->rcv_fcp_cmd_out),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) atomic_read(&tgtp->xmt_fcp_release));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) spin_lock_irqsave(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) lpfc_nvmet_defer_release(phba, ctxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) * @idx: relative index of MRQ vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) * @nvmebuf: pointer to received nvme data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) * @isr_timestamp: in jiffies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) * @cqflag: cq processing information regarding workload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) * This routine is used to process an unsolicited event received from a SLI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) * (Service Level Interface) ring. The actual processing of the data buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) * associated with the unsolicited event is done by invoking the routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) * SLI RQ on which the unsolicited event was received.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) uint32_t idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) struct rqb_dmabuf *nvmebuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) uint64_t isr_timestamp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) uint8_t cqflag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) if (!nvmebuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) "3167 NVMET FCP Drop IO\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) if (phba->nvmet_support == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) lpfc_nvmet_unsol_fcp_buffer(phba, idx, nvmebuf, isr_timestamp, cqflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) * @phba: pointer to a host N_Port data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) * @ctxp: Context info for NVME LS Request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) * @rspbuf: DMA buffer of NVME command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) * @rspsize: size of the NVME command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) * This routine is used for allocating a lpfc-WQE data structure from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) * the driver lpfc-WQE free-list and prepare the WQE with the parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) * passed into the routine for discovery state machine to issue an Extended
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) * and preparation routine that is used by all the discovery state machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) * routines and the NVME command-specific fields will be later set up by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) * the individual discovery machine routines after calling this routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) * allocating and preparing a generic WQE data structure. It fills in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) * Buffer Descriptor Entries (BDEs), allocates buffers for both command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) * payload and response payload (if expected). The reference count on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) * ndlp is incremented by 1 and the reference to the ndlp is put into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) * context1 of the WQE data structure for this WQE to hold the ndlp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) * reference for the command's callback function to access later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) * Return code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) * Pointer to the newly allocated/prepared nvme wqe data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) * NULL - when nvme wqe data structure allocation/preparation failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) static struct lpfc_iocbq *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) struct lpfc_async_xchg_ctx *ctxp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) dma_addr_t rspbuf, uint16_t rspsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) struct lpfc_iocbq *nvmewqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) union lpfc_wqe128 *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) if (!lpfc_is_link_up(phba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) "6104 NVMET prep LS wqe: link err: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) "NPORT x%x oxid:x%x ste %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) ctxp->sid, ctxp->oxid, ctxp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) /* Allocate buffer for command wqe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) nvmewqe = lpfc_sli_get_iocbq(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) if (nvmewqe == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) "6105 NVMET prep LS wqe: No WQE: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) "NPORT x%x oxid x%x ste %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) ctxp->sid, ctxp->oxid, ctxp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) "6106 NVMET prep LS wqe: No ndlp: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) "NPORT x%x oxid x%x ste %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) ctxp->sid, ctxp->oxid, ctxp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) goto nvme_wqe_free_wqeq_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) ctxp->wqeq = nvmewqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) /* prevent preparing wqe with NULL ndlp reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) nvmewqe->context1 = lpfc_nlp_get(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) if (nvmewqe->context1 == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) goto nvme_wqe_free_wqeq_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) nvmewqe->context2 = ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) wqe = &nvmewqe->wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) memset(wqe, 0, sizeof(union lpfc_wqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) /* Words 0 - 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) /* Word 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) /* Word 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) /* Word 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) /* Word 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) /* Word 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) CMD_XMIT_SEQUENCE64_WQE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) /* Word 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) /* Word 9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) /* Needs to be set by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) /* Word 10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) LPFC_WQE_LENLOC_WORD12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) /* Word 11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) LPFC_WQE_CQ_ID_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) OTHER_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) /* Word 12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) wqe->xmit_sequence.xmit_len = rspsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) nvmewqe->retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) nvmewqe->vport = phba->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) /* Xmit NVMET response to remote NPORT <did> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) "6039 Xmit NVMET LS response to remote "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) rspsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) return nvmewqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) nvme_wqe_free_wqeq_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) nvmewqe->context2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) nvmewqe->context3 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) lpfc_sli_release_iocbq(phba, nvmewqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) static struct lpfc_iocbq *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) struct lpfc_async_xchg_ctx *ctxp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) struct nvmefc_tgt_fcp_req *rsp = &ctxp->hdlrctx.fcp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) struct sli4_sge *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) struct lpfc_iocbq *nvmewqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) struct scatterlist *sgel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) union lpfc_wqe128 *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) struct ulp_bde64 *bde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) dma_addr_t physaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) int i, cnt, nsegs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) int do_pbde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) int xc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) if (!lpfc_is_link_up(phba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) "6107 NVMET prep FCP wqe: link err:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) "NPORT x%x oxid x%x ste %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) ctxp->sid, ctxp->oxid, ctxp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) "6108 NVMET prep FCP wqe: no ndlp: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) "NPORT x%x oxid x%x ste %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) ctxp->sid, ctxp->oxid, ctxp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) if (rsp->sg_cnt > lpfc_tgttemplate.max_sgl_segments) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) "6109 NVMET prep FCP wqe: seg cnt err: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) "NPORT x%x oxid x%x ste %d cnt %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) ctxp->sid, ctxp->oxid, ctxp->state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) phba->cfg_nvme_seg_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) nsegs = rsp->sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) nvmewqe = ctxp->wqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) if (nvmewqe == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) /* Allocate buffer for command wqe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) nvmewqe = ctxp->ctxbuf->iocbq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) if (nvmewqe == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) "6110 NVMET prep FCP wqe: No "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) "WQE: NPORT x%x oxid x%x ste %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) ctxp->sid, ctxp->oxid, ctxp->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) ctxp->wqeq = nvmewqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) xc = 0; /* create new XRI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) nvmewqe->sli4_lxritag = NO_XRI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) nvmewqe->sli4_xritag = NO_XRI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) if (((ctxp->state == LPFC_NVME_STE_RCV) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) (ctxp->entry_cnt == 1)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) (ctxp->state == LPFC_NVME_STE_DATA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) wqe = &nvmewqe->wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) "6111 Wrong state NVMET FCP: %d cnt %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) ctxp->state, ctxp->entry_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) switch (rsp->op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) case NVMET_FCOP_READDATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) case NVMET_FCOP_READDATA_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) /* From the tsend template, initialize words 7 - 11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) memcpy(&wqe->words[7],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) &lpfc_tsend_cmd_template.words[7],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) sizeof(uint32_t) * 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) /* Words 0 - 2 : The first sg segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) sgel = &rsp->sg[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) physaddr = sg_dma_address(sgel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) wqe->fcp_tsend.bde.addrHigh =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) cpu_to_le32(putPaddrHigh(physaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) /* Word 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) wqe->fcp_tsend.payload_offset_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) /* Word 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) wqe->fcp_tsend.relative_offset = ctxp->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) /* Word 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) wqe->fcp_tsend.reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) /* Word 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) nvmewqe->sli4_xritag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) /* Word 7 - set ar later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) /* Word 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) /* Word 9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) /* Word 10 - set wqes later, in template xc=1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) if (!xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) /* Word 11 - set sup, irsp, irsplen later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) do_pbde = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) /* Word 12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) /* Setup 2 SKIP SGEs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) sgl->addr_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) sgl->addr_lo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) sgl->word2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) sgl->word2 = cpu_to_le32(sgl->word2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) sgl->sge_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) sgl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) sgl->addr_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) sgl->addr_lo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) sgl->word2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) sgl->word2 = cpu_to_le32(sgl->word2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) sgl->sge_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) sgl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) if (rsp->op == NVMET_FCOP_READDATA_RSP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) atomic_inc(&tgtp->xmt_fcp_read_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) if (ndlp->nlp_flag & NLP_SUPPRESS_RSP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) bf_set(wqe_sup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) &wqe->fcp_tsend.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) ((rsp->rsplen >> 2) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) memcpy(&wqe->words[16], rsp->rspaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) rsp->rsplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) atomic_inc(&tgtp->xmt_fcp_read);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) case NVMET_FCOP_WRITEDATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) /* From the treceive template, initialize words 3 - 11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) memcpy(&wqe->words[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) &lpfc_treceive_cmd_template.words[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) sizeof(uint32_t) * 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) wqe->fcp_treceive.bde.tus.f.bdeSize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) wqe->fcp_treceive.bde.addrLow = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) wqe->fcp_treceive.bde.addrHigh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) /* Word 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) wqe->fcp_treceive.relative_offset = ctxp->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) /* Word 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) nvmewqe->sli4_xritag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) /* Word 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) /* Word 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) /* Word 9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) /* Word 10 - in template xc=1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) if (!xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) /* Word 11 - set pbde later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) if (phba->cfg_enable_pbde) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) do_pbde = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) do_pbde = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) /* Word 12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) /* Setup 2 SKIP SGEs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) sgl->addr_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) sgl->addr_lo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) sgl->word2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) sgl->word2 = cpu_to_le32(sgl->word2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) sgl->sge_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) sgl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) sgl->addr_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) sgl->addr_lo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) sgl->word2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) sgl->word2 = cpu_to_le32(sgl->word2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) sgl->sge_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) sgl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) atomic_inc(&tgtp->xmt_fcp_write);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) case NVMET_FCOP_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) /* From the treceive template, initialize words 4 - 11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) memcpy(&wqe->words[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) &lpfc_trsp_cmd_template.words[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) sizeof(uint32_t) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) /* Words 0 - 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) physaddr = rsp->rspdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) wqe->fcp_trsp.bde.addrLow =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) cpu_to_le32(putPaddrLow(physaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) wqe->fcp_trsp.bde.addrHigh =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) cpu_to_le32(putPaddrHigh(physaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) /* Word 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) wqe->fcp_trsp.response_len = rsp->rsplen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) /* Word 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) nvmewqe->sli4_xritag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) /* Word 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) /* Word 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) /* Word 9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) /* Word 10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) if (xc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) /* Word 11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) /* In template wqes=0 irsp=0 irsplen=0 - good response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) if (rsp->rsplen != LPFC_NVMET_SUCCESS_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) /* Bad response - embed it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) ((rsp->rsplen >> 2) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) do_pbde = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) /* Word 12 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) wqe->fcp_trsp.rsvd_12_15[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) /* Use rspbuf, NOT sg list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) nsegs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) sgl->word2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) atomic_inc(&tgtp->xmt_fcp_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) "6064 Unknown Rsp Op %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) rsp->op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) nvmewqe->retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) nvmewqe->vport = phba->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) nvmewqe->context1 = ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) for_each_sg(rsp->sg, sgel, nsegs, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) physaddr = sg_dma_address(sgel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) cnt = sg_dma_len(sgel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) sgl->addr_hi = putPaddrHigh(physaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) sgl->addr_lo = putPaddrLow(physaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) sgl->word2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) if ((i+1) == rsp->sg_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) bf_set(lpfc_sli4_sge_last, sgl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) sgl->word2 = cpu_to_le32(sgl->word2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) sgl->sge_len = cpu_to_le32(cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) bde = (struct ulp_bde64 *)&wqe->words[13];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) if (do_pbde) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) /* Words 13-15 (PBDE) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) bde->addrLow = sgl->addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) bde->addrHigh = sgl->addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) bde->tus.f.bdeSize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) le32_to_cpu(sgl->sge_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) bde->tus.w = cpu_to_le32(bde->tus.w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) memset(bde, 0, sizeof(struct ulp_bde64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) sgl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) ctxp->offset += cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) ctxp->state = LPFC_NVME_STE_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) ctxp->entry_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) return nvmewqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) * @phba: Pointer to HBA context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) * @cmdwqe: Pointer to driver command WQE object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) * @wcqe: Pointer to driver response CQE object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) * The function is called from SLI ring event handler with no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) * lock held. This function is the completion handler for NVME ABTS for FCP cmds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) * The function frees memory resources used for the NVME commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) struct lpfc_wcqe_complete *wcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) struct lpfc_async_xchg_ctx *ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) uint32_t result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) bool released = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) ctxp = cmdwqe->context2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) result = wcqe->parameter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) if (ctxp->flag & LPFC_NVME_ABORT_OP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) spin_lock_irqsave(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) ctxp->state = LPFC_NVME_STE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) /* Check if we already received a free context call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) * and we have completed processing an abort situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) !(ctxp->flag & LPFC_NVME_XBUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) list_del_init(&ctxp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) released = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) ctxp->flag &= ~LPFC_NVME_ABORT_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) spin_unlock_irqrestore(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) atomic_inc(&tgtp->xmt_abort_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) "6165 ABORT cmpl: oxid x%x flg x%x (%d) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) "WCQE: %08x %08x %08x %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) ctxp->oxid, ctxp->flag, released,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) wcqe->word0, wcqe->total_data_placed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) result, wcqe->word3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) cmdwqe->context2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) cmdwqe->context3 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) * if transport has released ctx, then can reuse it. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) * will be recycled by transport release call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) if (released)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) /* This is the iocbq for the abort, not the command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) lpfc_sli_release_iocbq(phba, cmdwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) /* Since iaab/iaar are NOT set, there is no work left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) * should have been called already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) * @phba: Pointer to HBA context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) * @cmdwqe: Pointer to driver command WQE object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) * @wcqe: Pointer to driver response CQE object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) * The function is called from SLI ring event handler with no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) * lock held. This function is the completion handler for NVME ABTS for FCP cmds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) * The function frees memory resources used for the NVME commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) struct lpfc_wcqe_complete *wcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) struct lpfc_async_xchg_ctx *ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) uint32_t result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) bool released = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) ctxp = cmdwqe->context2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) result = wcqe->parameter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) if (!ctxp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) /* if context is clear, related io alrady complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) wcqe->word0, wcqe->total_data_placed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) result, wcqe->word3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) spin_lock_irqsave(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) if (ctxp->flag & LPFC_NVME_ABORT_OP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) if (ctxp->state != LPFC_NVME_STE_ABORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) "6112 ABTS Wrong state:%d oxid x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) ctxp->state, ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) /* Check if we already received a free context call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) * and we have completed processing an abort situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) ctxp->state = LPFC_NVME_STE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) if ((ctxp->flag & LPFC_NVME_CTX_RLS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) !(ctxp->flag & LPFC_NVME_XBUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) list_del_init(&ctxp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) released = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) ctxp->flag &= ~LPFC_NVME_ABORT_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) spin_unlock_irqrestore(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) atomic_inc(&tgtp->xmt_abort_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) "6316 ABTS cmpl oxid x%x flg x%x (%x) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) "WCQE: %08x %08x %08x %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) ctxp->oxid, ctxp->flag, released,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) wcqe->word0, wcqe->total_data_placed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) result, wcqe->word3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) cmdwqe->context2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) cmdwqe->context3 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) * if transport has released ctx, then can reuse it. Otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) * will be recycled by transport release call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) if (released)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) /* Since iaab/iaar are NOT set, there is no work left.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) * For LPFC_NVME_XBUSY, lpfc_sli4_nvmet_xri_aborted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) * should have been called already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) * @phba: Pointer to HBA context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) * @cmdwqe: Pointer to driver command WQE object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) * @wcqe: Pointer to driver response CQE object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) * The function is called from SLI ring event handler with no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) * lock held. This function is the completion handler for NVME ABTS for LS cmds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) * The function frees memory resources used for the NVME commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) struct lpfc_wcqe_complete *wcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) struct lpfc_async_xchg_ctx *ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) uint32_t result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) ctxp = cmdwqe->context2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) result = wcqe->parameter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) if (phba->nvmet_support) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) atomic_inc(&tgtp->xmt_ls_abort_cmpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) ctxp, wcqe->word0, wcqe->total_data_placed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) result, wcqe->word3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) if (!ctxp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) "6415 NVMET LS Abort No ctx: WCQE: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) "%08x %08x %08x %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) wcqe->word0, wcqe->total_data_placed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) result, wcqe->word3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) lpfc_sli_release_iocbq(phba, cmdwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) if (ctxp->state != LPFC_NVME_STE_LS_ABORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) "6416 NVMET LS abort cmpl state mismatch: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) "oxid x%x: %d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) ctxp->oxid, ctxp->state, ctxp->entry_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) cmdwqe->context2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) cmdwqe->context3 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) lpfc_sli_release_iocbq(phba, cmdwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) kfree(ctxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) struct lpfc_async_xchg_ctx *ctxp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) uint32_t sid, uint16_t xri)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) struct lpfc_nvmet_tgtport *tgtp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) struct lpfc_iocbq *abts_wqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) union lpfc_wqe128 *wqe_abts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) "6067 ABTS: sid %x xri x%x/x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) sid, xri, ctxp->wqeq->sli4_xritag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) if (phba->nvmet_support && phba->targetport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) ndlp = lpfc_findnode_did(phba->pport, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) if (tgtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) atomic_inc(&tgtp->xmt_abort_rsp_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) "6134 Drop ABTS - wrong NDLP state x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) /* No failure to an ABTS request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) abts_wqeq = ctxp->wqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) wqe_abts = &abts_wqeq->wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) * Since we zero the whole WQE, we need to ensure we set the WQE fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) * that were initialized in lpfc_sli4_nvmet_alloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) memset(wqe_abts, 0, sizeof(union lpfc_wqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) /* Word 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) /* Word 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) abts_wqeq->sli4_xritag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) /* Word 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) CMD_XMIT_SEQUENCE64_WQE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) /* Word 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) /* Word 9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) /* Needs to be set by caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) /* Word 10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) LPFC_WQE_LENLOC_WORD12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) /* Word 11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) LPFC_WQE_CQ_ID_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) OTHER_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) abts_wqeq->vport = phba->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) abts_wqeq->context1 = ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) abts_wqeq->context2 = ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) abts_wqeq->context3 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) abts_wqeq->rsvd2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) /* hba_wqidx should already be setup from command we are aborting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) abts_wqeq->iocb.ulpLe = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) "6069 Issue ABTS to xri x%x reqtag x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) xri, abts_wqeq->iotag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) struct lpfc_async_xchg_ctx *ctxp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) uint32_t sid, uint16_t xri)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) struct lpfc_iocbq *abts_wqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) u8 opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) if (!ctxp->wqeq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) ctxp->wqeq = ctxp->ctxbuf->iocbq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) ctxp->wqeq->hba_wqidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) ndlp = lpfc_findnode_did(phba->pport, sid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) atomic_inc(&tgtp->xmt_abort_rsp_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) "6160 Drop ABORT - wrong NDLP state x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) /* No failure to an ABTS request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) spin_lock_irqsave(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) ctxp->flag &= ~LPFC_NVME_ABORT_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) spin_unlock_irqrestore(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) /* Issue ABTS for this WQE based on iotag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) spin_lock_irqsave(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) if (!ctxp->abort_wqeq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) atomic_inc(&tgtp->xmt_abort_rsp_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) "6161 ABORT failed: No wqeqs: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) "xri: x%x\n", ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) /* No failure to an ABTS request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) ctxp->flag &= ~LPFC_NVME_ABORT_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) spin_unlock_irqrestore(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) abts_wqeq = ctxp->abort_wqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) ctxp->state = LPFC_NVME_STE_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) opt = (ctxp->flag & LPFC_NVME_ABTS_RCV) ? INHIBIT_ABORT : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) spin_unlock_irqrestore(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) /* Announce entry to new IO submit field. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) "6162 ABORT Request to rport DID x%06x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) "for xri x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) /* If the hba is getting reset, this flag is set. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) * cleared when the reset is complete and rings reestablished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) spin_lock_irqsave(&phba->hbalock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) /* driver queued commands are in process of being flushed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) if (phba->hba_flag & HBA_IOQ_FLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) spin_unlock_irqrestore(&phba->hbalock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) atomic_inc(&tgtp->xmt_abort_rsp_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) "6163 Driver in reset cleanup - flushing "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) "NVME Req now. hba_flag x%x oxid x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) phba->hba_flag, ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) lpfc_sli_release_iocbq(phba, abts_wqeq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) spin_lock_irqsave(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) ctxp->flag &= ~LPFC_NVME_ABORT_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) spin_unlock_irqrestore(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) /* Outstanding abort is in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) spin_unlock_irqrestore(&phba->hbalock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) atomic_inc(&tgtp->xmt_abort_rsp_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) "6164 Outstanding NVME I/O Abort Request "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) "still pending on oxid x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) lpfc_sli_release_iocbq(phba, abts_wqeq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) spin_lock_irqsave(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) ctxp->flag &= ~LPFC_NVME_ABORT_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) spin_unlock_irqrestore(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) /* Ready - mark outstanding as aborted by driver. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) /* ABTS WQE must go to the same WQ as the WQE to be aborted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) abts_wqeq->iocb_cmpl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) abts_wqeq->iocb_flag |= LPFC_IO_NVME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) abts_wqeq->context2 = ctxp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) abts_wqeq->vport = phba->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) if (!ctxp->hdwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) spin_unlock_irqrestore(&phba->hbalock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) if (rc == WQE_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) atomic_inc(&tgtp->xmt_abort_sol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) atomic_inc(&tgtp->xmt_abort_rsp_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) spin_lock_irqsave(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) ctxp->flag &= ~LPFC_NVME_ABORT_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) spin_unlock_irqrestore(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) lpfc_sli_release_iocbq(phba, abts_wqeq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) "6166 Failed ABORT issue_wqe with status x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) "for oxid x%x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) rc, ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) struct lpfc_async_xchg_ctx *ctxp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) uint32_t sid, uint16_t xri)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) struct lpfc_iocbq *abts_wqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) bool released = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) if (!ctxp->wqeq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) ctxp->wqeq = ctxp->ctxbuf->iocbq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) ctxp->wqeq->hba_wqidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) if (ctxp->state == LPFC_NVME_STE_FREE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) "6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) ctxp->state, ctxp->entry_cnt, ctxp->oxid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) rc = WQE_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) goto aerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) ctxp->state = LPFC_NVME_STE_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) ctxp->entry_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) goto aerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) spin_lock_irqsave(&phba->hbalock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) abts_wqeq = ctxp->wqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) abts_wqeq->iocb_cmpl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) if (!ctxp->hdwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) spin_unlock_irqrestore(&phba->hbalock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) if (rc == WQE_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) aerr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) spin_lock_irqsave(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) if (ctxp->flag & LPFC_NVME_CTX_RLS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) list_del_init(&ctxp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) released = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) ctxp->flag &= ~(LPFC_NVME_ABORT_OP | LPFC_NVME_CTX_RLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) spin_unlock_irqrestore(&ctxp->ctxlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) atomic_inc(&tgtp->xmt_abort_rsp_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) "6135 Failed to Issue ABTS for oxid x%x. Status x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) "(%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) ctxp->oxid, rc, released);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) if (released)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) * lpfc_nvme_unsol_ls_issue_abort - issue ABTS on an exchange received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) * via async frame receive where the frame is not handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) * @phba: pointer to adapter structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) * @ctxp: pointer to the asynchronously received received sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) * @sid: address of the remote port to send the ABTS to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) * @xri: oxid value to for the ABTS (other side's exchange id).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) struct lpfc_async_xchg_ctx *ctxp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) uint32_t sid, uint16_t xri)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) struct lpfc_nvmet_tgtport *tgtp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) struct lpfc_iocbq *abts_wqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) if ((ctxp->state == LPFC_NVME_STE_LS_RCV && ctxp->entry_cnt == 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) (ctxp->state == LPFC_NVME_STE_LS_RSP && ctxp->entry_cnt == 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) ctxp->state = LPFC_NVME_STE_LS_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) ctxp->entry_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) "6418 NVMET LS abort state mismatch "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) "IO x%x: %d %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) ctxp->oxid, ctxp->state, ctxp->entry_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) ctxp->state = LPFC_NVME_STE_LS_ABORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) if (phba->nvmet_support && phba->targetport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) if (!ctxp->wqeq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) /* Issue ABTS for this WQE based on iotag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) ctxp->wqeq = lpfc_sli_get_iocbq(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) if (!ctxp->wqeq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) "6068 Abort failed: No wqeqs: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) "xri: x%x\n", xri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) /* No failure to an ABTS request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) kfree(ctxp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) abts_wqeq = ctxp->wqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) rc = WQE_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) spin_lock_irqsave(&phba->hbalock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) abts_wqeq->iocb_cmpl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) spin_unlock_irqrestore(&phba->hbalock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) if (rc == WQE_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) if (tgtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) atomic_inc(&tgtp->xmt_abort_unsol);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) if (tgtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) atomic_inc(&tgtp->xmt_abort_rsp_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) abts_wqeq->context2 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) abts_wqeq->context3 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) lpfc_sli_release_iocbq(phba, abts_wqeq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) "6056 Failed to Issue ABTS. Status x%x\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) * lpfc_nvmet_invalidate_host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) * @phba - pointer to the driver instance bound to an adapter port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) * @ndlp - pointer to an lpfc_nodelist type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) * This routine upcalls the nvmet transport to invalidate an NVME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) * host to which this target instance had active connections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) struct lpfc_nvmet_tgtport *tgtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) "6203 Invalidating hosthandle x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) /* Need to get the nvmet_fc_target_port pointer here.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) nvmet_fc_invalidate_host(phba->targetport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) }