^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*******************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This file is part of the Emulex Linux Device Driver for *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Fibre Channel Host Bus Adapters. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2004-2016 Emulex. All rights reserved. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * EMULEX and SLI are trademarks of Emulex. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * www.broadcom.com *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Portions Copyright (C) 2004-2005 Christoph Hellwig *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * This program is free software; you can redistribute it and/or *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * modify it under the terms of version 2 of the GNU General *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Public License as published by the Free Software Foundation. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * This program is distributed in the hope that it will be useful. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * TO BE LEGALLY INVALID. See the GNU General Public License for *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * more details, a copy of which can be found in the file COPYING *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * included with this package. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) ********************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/nvme.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/nvme-fc-driver.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/nvme-fc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define LPFC_NVME_DEFAULT_SEGS (64 + 1) /* 256K IOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define LPFC_NVME_ERSP_LEN 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define LPFC_NVME_WAIT_TMO 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define LPFC_NVME_EXPEDITE_XRICNT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define LPFC_NVME_FB_SHIFT 9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define LPFC_NVME_MAX_FB (1 << 20) /* 1M */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define LPFC_MAX_NVME_INFO_TMP_LEN 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define lpfc_ndlp_get_nrport(ndlp) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) ((!ndlp->nrport || (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) ? NULL : ndlp->nrport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct lpfc_nvme_qhandle {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) uint32_t index; /* WQ index to use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) uint32_t qidx; /* queue index passed to create */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) uint32_t cpu_id; /* current cpu id at time of create */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /* Declare nvme-based local and remote port definitions. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct lpfc_nvme_lport {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct lpfc_vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct completion *lport_unreg_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Add stats counters here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) atomic_t fc4NvmeLsRequests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) atomic_t fc4NvmeLsCmpls;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) atomic_t xmt_fcp_noxri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) atomic_t xmt_fcp_bad_ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) atomic_t xmt_fcp_qdepth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) atomic_t xmt_fcp_wqerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) atomic_t xmt_fcp_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) atomic_t xmt_fcp_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) atomic_t xmt_ls_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) atomic_t xmt_ls_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) atomic_t cmpl_fcp_xb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) atomic_t cmpl_fcp_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) atomic_t cmpl_ls_xb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) atomic_t cmpl_ls_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct lpfc_nvme_rport {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) struct lpfc_nvme_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct nvme_fc_remote_port *remoteport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) struct completion rport_unreg_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct lpfc_nvme_fcpreq_priv {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct lpfc_io_buf *nvme_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * set NVME LS request timeouts to 30s. It is larger than the 2*R_A_TOV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * set by the spec, which appears to have issues with some devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define LPFC_NVME_LS_TIMEOUT 30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define LPFC_NVMET_DEFAULT_SEGS (64 + 1) /* 256K IOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define LPFC_NVMET_RQE_MIN_POST 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define LPFC_NVMET_RQE_DEF_POST 512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define LPFC_NVMET_RQE_DEF_COUNT 2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define LPFC_NVMET_SUCCESS_LEN 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define LPFC_NVMET_MRQ_AUTO 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define LPFC_NVMET_MRQ_MAX 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define LPFC_NVMET_WAIT_TMO (5 * MSEC_PER_SEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /* Used for NVME Target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define LPFC_NVMET_INV_HOST_ACTIVE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct lpfc_nvmet_tgtport {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) struct lpfc_hba *phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct completion *tport_unreg_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) atomic_t state; /* tracks nvmet hosthandle invalidation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Stats counters - lpfc_nvmet_unsol_ls_buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) atomic_t rcv_ls_req_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) atomic_t rcv_ls_req_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) atomic_t rcv_ls_req_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) atomic_t xmt_ls_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) atomic_t xmt_ls_abort_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /* Stats counters - lpfc_nvmet_xmt_ls_rsp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) atomic_t xmt_ls_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) atomic_t xmt_ls_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Stats counters - lpfc_nvmet_xmt_ls_rsp_cmp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) atomic_t xmt_ls_rsp_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) atomic_t xmt_ls_rsp_aborted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) atomic_t xmt_ls_rsp_xb_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) atomic_t xmt_ls_rsp_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /* Stats counters - lpfc_nvmet_unsol_fcp_buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) atomic_t rcv_fcp_cmd_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) atomic_t rcv_fcp_cmd_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) atomic_t rcv_fcp_cmd_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) atomic_t rcv_fcp_cmd_defer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) atomic_t xmt_fcp_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /* Stats counters - lpfc_nvmet_xmt_fcp_op */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) atomic_t xmt_fcp_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) atomic_t xmt_fcp_read_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) atomic_t xmt_fcp_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) atomic_t xmt_fcp_write;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) atomic_t xmt_fcp_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* Stats counters - lpfc_nvmet_xmt_fcp_op_cmp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) atomic_t xmt_fcp_rsp_xb_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) atomic_t xmt_fcp_rsp_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) atomic_t xmt_fcp_rsp_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) atomic_t xmt_fcp_rsp_aborted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) atomic_t xmt_fcp_rsp_drop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* Stats counters - lpfc_nvmet_xmt_fcp_abort */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) atomic_t xmt_fcp_xri_abort_cqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) atomic_t xmt_fcp_abort;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) atomic_t xmt_fcp_abort_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) atomic_t xmt_abort_sol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) atomic_t xmt_abort_unsol;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) atomic_t xmt_abort_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) atomic_t xmt_abort_rsp_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Stats counters - defer IO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) atomic_t defer_ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) atomic_t defer_fod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) atomic_t defer_wqfull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct lpfc_nvmet_ctx_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) struct list_head nvmet_ctx_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) spinlock_t nvmet_ctx_list_lock; /* lock per CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct lpfc_nvmet_ctx_info *nvmet_ctx_next_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct lpfc_nvmet_ctx_info *nvmet_ctx_start_cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) uint16_t nvmet_ctx_list_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) char pad[16]; /* pad to a cache-line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* This retrieves the context info associated with the specified cpu / mrq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define lpfc_get_ctx_list(phba, cpu, mrq) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) (phba->sli4_hba.nvmet_ctx_info + ((cpu * phba->cfg_nvmet_mrq) + mrq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /* Values for state field of struct lpfc_async_xchg_ctx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define LPFC_NVME_STE_LS_RCV 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define LPFC_NVME_STE_LS_ABORT 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define LPFC_NVME_STE_LS_RSP 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define LPFC_NVME_STE_RCV 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #define LPFC_NVME_STE_DATA 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define LPFC_NVME_STE_ABORT 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define LPFC_NVME_STE_DONE 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define LPFC_NVME_STE_FREE 0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* Values for flag field of struct lpfc_async_xchg_ctx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define LPFC_NVME_IO_INP 0x1 /* IO is in progress on exchange */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define LPFC_NVME_ABORT_OP 0x2 /* Abort WQE issued on exchange */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define LPFC_NVME_XBUSY 0x4 /* XB bit set on IO cmpl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define LPFC_NVME_CTX_RLS 0x8 /* ctx free requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define LPFC_NVME_ABTS_RCV 0x10 /* ABTS received on exchange */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define LPFC_NVME_CTX_REUSE_WQ 0x20 /* ctx reused via WQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #define LPFC_NVME_DEFER_WQFULL 0x40 /* Waiting on a free WQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define LPFC_NVME_TNOTIFY 0x80 /* notify transport of abts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) struct lpfc_async_xchg_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) struct nvmefc_tgt_fcp_req fcp_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) } hdlrctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) struct lpfc_hba *phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) struct nvmefc_ls_req *ls_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) struct nvmefc_ls_rsp ls_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) struct lpfc_iocbq *wqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) struct lpfc_iocbq *abort_wqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) spinlock_t ctxlock; /* protect flag access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) uint32_t sid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) uint32_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) uint16_t oxid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) uint16_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) uint16_t entry_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) uint16_t cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) uint16_t idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) uint16_t state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) uint16_t flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) void *payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) struct rqb_dmabuf *rqb_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) struct lpfc_nvmet_ctxbuf *ctxbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct lpfc_sli4_hdw_queue *hdwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) uint64_t ts_isr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) uint64_t ts_cmd_nvme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) uint64_t ts_nvme_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) uint64_t ts_data_wqput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) uint64_t ts_isr_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) uint64_t ts_data_nvme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) uint64_t ts_nvme_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) uint64_t ts_status_wqput;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) uint64_t ts_isr_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) uint64_t ts_status_nvme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /* routines found in lpfc_nvme.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) int __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct nvmefc_ls_req *pnvme_lsreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) void (*gen_req_cmp)(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct lpfc_iocbq *cmdwqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct lpfc_wcqe_complete *wcqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) void __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) int __lpfc_nvme_ls_abort(struct lpfc_vport *vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct lpfc_nodelist *ndlp, struct nvmefc_ls_req *pnvme_lsreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /* routines found in lpfc_nvmet.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) int lpfc_nvme_unsol_ls_issue_abort(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct lpfc_async_xchg_ctx *ctxp, uint32_t sid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) uint16_t xri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) int __lpfc_nvme_xmt_ls_rsp(struct lpfc_async_xchg_ctx *axchg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) struct nvmefc_ls_rsp *ls_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) void (*xmt_ls_rsp_cmp)(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) struct lpfc_iocbq *cmdwqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) struct lpfc_wcqe_complete *wcqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) void __lpfc_nvme_xmt_ls_rsp_cmp(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct lpfc_iocbq *cmdwqe, struct lpfc_wcqe_complete *wcqe);