^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*******************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This file is part of the Emulex Linux Device Driver for *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Fibre Channel Host Bus Adapters. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2004-2016 Emulex. All rights reserved. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * EMULEX and SLI are trademarks of Emulex. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * www.broadcom.com *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Portions Copyright (C) 2004-2005 Christoph Hellwig *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * This program is free software; you can redistribute it and/or *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * modify it under the terms of version 2 of the GNU General *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Public License as published by the Free Software Foundation. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * This program is distributed in the hope that it will be useful. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * TO BE LEGALLY INVALID. See the GNU General Public License for *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * more details, a copy of which can be found in the file COPYING *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * included with this package. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) ********************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/crc-t10dif.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <net/checksum.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <scsi/scsi_eh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <scsi/scsi_transport_fc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <scsi/fc/fc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "lpfc_version.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include "lpfc_hw4.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include "lpfc_hw.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "lpfc_sli.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "lpfc_sli4.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include "lpfc_nl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include "lpfc_disc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include "lpfc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include "lpfc_nvme.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include "lpfc_scsi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include "lpfc_logmsg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include "lpfc_crtn.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include "lpfc_vport.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include "lpfc_debugfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* NVME initiator-based functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static struct lpfc_io_buf *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) int idx, int expedite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_io_buf *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static struct nvme_fc_port_template lpfc_nvme_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static union lpfc_wqe128 lpfc_iread_cmd_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static union lpfc_wqe128 lpfc_iwrite_cmd_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static union lpfc_wqe128 lpfc_icmnd_cmd_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Setup WQE templates for NVME IOs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) lpfc_nvme_cmd_template(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) union lpfc_wqe128 *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* IREAD template */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) wqe = &lpfc_iread_cmd_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) memset(wqe, 0, sizeof(union lpfc_wqe128));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* Word 0, 1, 2 - BDE is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* Word 3 - cmd_buff_len, payload_offset_len is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* Word 4 - total_xfer_len is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* Word 5 - is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* Word 6 - ctxt_tag, xri_tag is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* Word 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Word 8 - abort_tag is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* Word 9 - reqtag is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* Word 10 - dbde, wqes is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) bf_set(wqe_nvme, &wqe->fcp_iread.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /* Word 11 - pbde is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, NVME_READ_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* Word 12 - is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* Word 13, 14, 15 - PBDE is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* IWRITE template */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) wqe = &lpfc_iwrite_cmd_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) memset(wqe, 0, sizeof(union lpfc_wqe128));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /* Word 0, 1, 2 - BDE is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* Word 3 - cmd_buff_len, payload_offset_len is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* Word 4 - total_xfer_len is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Word 5 - initial_xfer_len is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* Word 6 - ctxt_tag, xri_tag is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Word 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* Word 8 - abort_tag is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /* Word 9 - reqtag is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* Word 10 - dbde, wqes is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) bf_set(wqe_nvme, &wqe->fcp_iwrite.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* Word 11 - pbde is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, NVME_WRITE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Word 12 - is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Word 13, 14, 15 - PBDE is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* ICMND template */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) wqe = &lpfc_icmnd_cmd_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) memset(wqe, 0, sizeof(union lpfc_wqe128));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* Word 0, 1, 2 - BDE is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* Word 3 - payload_offset_len is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* Word 4, 5 - is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /* Word 6 - ctxt_tag, xri_tag is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Word 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* Word 8 - abort_tag is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /* Word 9 - reqtag is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /* Word 10 - dbde, wqes is variable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Word 11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, FCP_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /* Word 12, 13, 14, 15 - is zero */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * lpfc_nvme_prep_abort_wqe - set up 'abort' work queue entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * @pwqeq: Pointer to command iocb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * @xritag: Tag that uniqely identifies the local exchange resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * @opt: Option bits -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * bit 0 = inhibit sending abts on the link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * This function is called with hbalock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) union lpfc_wqe128 *wqe = &pwqeq->wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* WQEs are reused. Clear stale data and set key fields to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) memset(wqe, 0, sizeof(*wqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (opt & INHIBIT_ABORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /* Abort specified xri tag, with the mask deliberately zeroed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) /* Abort the IO associated with this outstanding exchange ID. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) wqe->abort_cmd.wqe_com.abort_tag = xritag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /* iotag for the wqe completion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * lpfc_nvme_create_queue -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * @pnvme_lport: Transport localport that LS is to be issued from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * @lpfc_pnvme: Pointer to the driver's nvme instance data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * @qsize: Size of the queue in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * @handle: An opaque driver handle used in follow-up calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * Driver registers this routine to preallocate and initialize any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * internal data structures to bind the @qidx to its internal IO queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * Return value :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * -EINVAL - Unsupported input value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * -ENOMEM - Could not alloc necessary memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) unsigned int qidx, u16 qsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) void **handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct lpfc_nvme_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct lpfc_vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct lpfc_nvme_qhandle *qhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) char *str;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (!pnvme_lport->private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) vport = lport->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (qhandle == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) qhandle->cpu_id = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) qhandle->qidx = qidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * NVME qidx == 0 is the admin queue, so both admin queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * and first IO queue will use MSI-X vector and associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (qidx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) str = "IO "; /* IO queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) qhandle->index = ((qidx - 1) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) lpfc_nvme_template.max_hw_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) str = "ADM"; /* Admin queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) qhandle->index = qidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) "6073 Binding %s HdwQueue %d (cpu %d) to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) "hdw_queue %d qhandle x%px\n", str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) qidx, qhandle->cpu_id, qhandle->index, qhandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) *handle = (void *)qhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * lpfc_nvme_delete_queue -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * @pnvme_lport: Transport localport that LS is to be issued from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * @handle: An opaque driver handle from lpfc_nvme_create_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * Driver registers this routine to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * any internal data structures to bind the @qidx to its internal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * IO queues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * Return value :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * TODO: What are the failure codes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) unsigned int qidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) void *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) struct lpfc_nvme_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) struct lpfc_vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!pnvme_lport->private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) vport = lport->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) lport, qidx, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) kfree(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) struct lpfc_nvme_lport *lport = localport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) "6173 localport x%px delete complete\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) /* release any threads waiting for the unreg to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (lport->vport->localport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) complete(lport->lport_unreg_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* lpfc_nvme_remoteport_delete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * @remoteport: Pointer to an nvme transport remoteport instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * This is a template downcall. NVME transport calls this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * when it has completed the unregistration of a previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * registered remoteport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * Return value :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct lpfc_nvme_rport *rport = remoteport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct lpfc_vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ndlp = rport->ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (!ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) goto rport_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) vport = ndlp->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) goto rport_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /* Remove this rport from the lport's list - memory is owned by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) * transport. Remove the ndlp reference for the NVME transport before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * calling state machine to remove the node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) "6146 remoteport delete of remoteport x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) remoteport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) spin_lock_irq(&vport->phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) /* The register rebind might have occurred before the delete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * downcall. Guard against this race.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) ndlp->nrport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) spin_unlock_irq(&vport->phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* Remove original register reference. The host transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * won't reference this rport/remoteport any further.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) spin_unlock_irq(&vport->phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) rport_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * lpfc_nvme_handle_lsreq - Process an unsolicited NVME LS request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * @axchg: pointer to exchange context for the NVME LS request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * This routine is used for processing an asychronously received NVME LS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * request. Any remaining validation is done and the LS is then forwarded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * to the nvme-fc transport via nvme_fc_rcv_ls_req().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * The calling sequence should be: nvme_fc_rcv_ls_req() -> (processing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * -> lpfc_nvme_xmt_ls_rsp/cmp -> req->done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * __lpfc_nvme_xmt_ls_rsp_cmp should free the allocated axchg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * Returns 0 if LS was handled and delivered to the transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * Returns 1 if LS failed to be handled and should be dropped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) lpfc_nvme_handle_lsreq(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) struct lpfc_async_xchg_ctx *axchg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) #if (IS_ENABLED(CONFIG_NVME_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) struct lpfc_vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct lpfc_nvme_rport *lpfc_rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) struct nvme_fc_remote_port *remoteport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct lpfc_nvme_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) uint32_t *payload = axchg->payload;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) vport = axchg->ndlp->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) lpfc_rport = axchg->ndlp->nrport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (!lpfc_rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) remoteport = lpfc_rport->remoteport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (!vport->localport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) lport = vport->localport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) if (!lport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) rc = nvme_fc_rcv_ls_req(remoteport, &axchg->ls_rsp, axchg->payload,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) axchg->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) "6205 NVME Unsol rcv: sz %d rc %d: %08x %08x %08x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) "%08x %08x %08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) axchg->size, rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) *payload, *(payload+1), *(payload+2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) *(payload+3), *(payload+4), *(payload+5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * __lpfc_nvme_ls_req_cmp - Generic completion handler for a NVME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * LS request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * @phba: Pointer to HBA context object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * @vport: The local port that issued the LS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * @cmdwqe: Pointer to driver command WQE object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * @wcqe: Pointer to driver response CQE object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * This function is the generic completion handler for NVME LS requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * The function updates any states and statistics, calls the transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * ls_req done() routine, then tears down the command and buffers used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * for the LS request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) __lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_vport *vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) struct lpfc_iocbq *cmdwqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct lpfc_wcqe_complete *wcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) struct nvmefc_ls_req *pnvme_lsreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct lpfc_dmabuf *buf_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) uint32_t status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) "6047 NVMEx LS REQ %px cmpl DID %x Xri: %x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) "status %x reason x%x cmd:x%px lsreg:x%px bmp:x%px "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) "ndlp:x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) cmdwqe->sli4_xritag, status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) (wcqe->parameter & 0xffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) lpfc_nvmeio_data(phba, "NVMEx LS CMPL: xri x%x stat x%x parm x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) cmdwqe->sli4_xritag, status, wcqe->parameter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (cmdwqe->context3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) kfree(buf_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) cmdwqe->context3 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (pnvme_lsreq->done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) pnvme_lsreq->done(pnvme_lsreq, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) "6046 NVMEx cmpl without done call back? "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) "Data %px DID %x Xri: %x status %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) cmdwqe->sli4_xritag, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (ndlp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) cmdwqe->context1 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) lpfc_sli_release_iocbq(phba, cmdwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) lpfc_nvme_ls_req_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) struct lpfc_wcqe_complete *wcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct lpfc_vport *vport = cmdwqe->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) struct lpfc_nvme_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) uint32_t status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) if (vport->localport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) lport = (struct lpfc_nvme_lport *)vport->localport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (lport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) atomic_inc(&lport->fc4NvmeLsCmpls);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) if (bf_get(lpfc_wcqe_c_xb, wcqe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) atomic_inc(&lport->cmpl_ls_xb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) atomic_inc(&lport->cmpl_ls_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) __lpfc_nvme_ls_req_cmp(phba, vport, cmdwqe, wcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) struct lpfc_dmabuf *inp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) struct nvmefc_ls_req *pnvme_lsreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) struct lpfc_wcqe_complete *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct lpfc_nodelist *ndlp, uint32_t num_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) uint32_t tmo, uint8_t retry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) struct lpfc_hba *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) union lpfc_wqe128 *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) struct lpfc_iocbq *genwqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct ulp_bde64 *bpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) struct ulp_bde64 bde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) int i, rc, xmit_len, first_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) /* Allocate buffer for command WQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) genwqe = lpfc_sli_get_iocbq(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (genwqe == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) wqe = &genwqe->wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /* Initialize only 64 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) memset(wqe, 0, sizeof(union lpfc_wqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) genwqe->context3 = (uint8_t *)bmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) genwqe->iocb_flag |= LPFC_IO_NVME_LS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /* Save for completion so we can release these resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) genwqe->context1 = lpfc_nlp_get(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) genwqe->context2 = (uint8_t *)pnvme_lsreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /* Fill in payload, bp points to frame payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (!tmo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) /* FC spec states we need 3 * ratov for CT requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) tmo = (3 * phba->fc_ratov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /* For this command calculate the xmit length of the request bde. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) xmit_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) first_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) bpl = (struct ulp_bde64 *)bmp->virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) for (i = 0; i < num_entry; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) bde.tus.w = bpl[i].tus.w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) xmit_len += bde.tus.f.bdeSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) first_len = xmit_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) genwqe->rsvd2 = num_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) genwqe->hba_wqidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) /* Words 0 - 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) wqe->generic.bde.tus.f.bdeSize = first_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) wqe->generic.bde.addrLow = bpl[0].addrLow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) wqe->generic.bde.addrHigh = bpl[0].addrHigh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /* Word 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) wqe->gen_req.request_payload_len = first_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /* Word 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) /* Word 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /* Word 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) /* Word 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /* Word 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) /* Word 9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /* Word 10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /* Word 11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /* Issue GEN REQ WQE for NPORT <did> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) genwqe->wqe_cmpl = cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) genwqe->iocb_cmpl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) genwqe->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) genwqe->retry = retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) "6045 Issue GEN REQ WQE to NPORT x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) "Data: x%x x%x rc x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) ndlp->nlp_DID, genwqe->iotag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) vport->port_state, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) lpfc_sli_release_iocbq(phba, genwqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) "6050 Issue GEN REQ WQE to NPORT x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) "Data: oxid: x%x state: x%x wq:x%px lsreq:x%px "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) "bmp:x%px xmit:%d 1st:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) ndlp->nlp_DID, genwqe->sli4_xritag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) vport->port_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) * __lpfc_nvme_ls_req - Generic service routine to issue an NVME LS request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) * @vport: The local port issuing the LS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) * @ndlp: The remote port to send the LS to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * @pnvme_lsreq: Pointer to LS request structure from the transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * @gen_req_cmp: Completion call-back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * Routine validates the ndlp, builds buffers and sends a GEN_REQUEST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * WQE to perform the LS operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * Return value :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * non-zero: various error codes, in form of -Exxx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct nvmefc_ls_req *pnvme_lsreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) void (*gen_req_cmp)(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) struct lpfc_iocbq *cmdwqe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) struct lpfc_wcqe_complete *wcqe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) struct lpfc_dmabuf *bmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct ulp_bde64 *bpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) uint16_t ntype, nstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) "6051 NVMEx LS REQ: Bad NDLP x%px, Failing "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) "LS Req\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) ntype = ndlp->nlp_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) nstate = ndlp->nlp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) if ((ntype & NLP_NVME_TARGET && nstate != NLP_STE_MAPPED_NODE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) (ntype & NLP_NVME_INITIATOR && nstate != NLP_STE_UNMAPPED_NODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) "6088 NVMEx LS REQ: Fail DID x%06x not "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) "ready for IO. Type x%x, State x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) ndlp->nlp_DID, ntype, nstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) if (!vport->phba->sli4_hba.nvmels_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * there are two dma buf in the request, actually there is one and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * the second one is just the start address + cmd size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * in a lpfc_dmabuf struct. When freeing we just free the wrapper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * because the nvem layer owns the data bufs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * We do not have to break these packets open, we don't care what is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * in them. And we do not have to look at the resonse data, we only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * care that we got a response. All of the caring is going to happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * in the nvme-fc layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (!bmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) "6044 NVMEx LS REQ: Could not alloc LS buf "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) "for DID %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) ndlp->nlp_DID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) if (!bmp->virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) "6042 NVMEx LS REQ: Could not alloc mbuf "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) "for DID %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) ndlp->nlp_DID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) kfree(bmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) INIT_LIST_HEAD(&bmp->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) bpl = (struct ulp_bde64 *)bmp->virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) bpl->tus.f.bdeFlags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) bpl->tus.w = le32_to_cpu(bpl->tus.w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) bpl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) bpl->tus.w = le32_to_cpu(bpl->tus.w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) "6149 NVMEx LS REQ: Issue to DID 0x%06x lsreq x%px, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) "rqstlen:%d rsplen:%d %pad %pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) ndlp->nlp_DID, pnvme_lsreq, pnvme_lsreq->rqstlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) &pnvme_lsreq->rspdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) pnvme_lsreq, gen_req_cmp, ndlp, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) LPFC_NVME_LS_TIMEOUT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (ret != WQE_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) "6052 NVMEx REQ: EXIT. issue ls wqe failed "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) "lsreq x%px Status %x DID %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) pnvme_lsreq, ret, ndlp->nlp_DID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) kfree(bmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * lpfc_nvme_ls_req - Issue an NVME Link Service request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * @pnvme_lport: Transport localport that LS is to be issued from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * @nvme_rport: Transport remoteport that LS is to be sent to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * Driver registers this routine to handle any link service request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * from the nvme_fc transport to a remote nvme-aware port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) * Return value :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * non-zero: various error codes, in form of -Exxx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) struct nvme_fc_remote_port *pnvme_rport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) struct nvmefc_ls_req *pnvme_lsreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct lpfc_nvme_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct lpfc_nvme_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct lpfc_vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (unlikely(!lport) || unlikely(!rport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) vport = lport->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) if (vport->load_flag & FC_UNLOADING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) atomic_inc(&lport->fc4NvmeLsRequests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) ret = __lpfc_nvme_ls_req(vport, rport->ndlp, pnvme_lsreq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) lpfc_nvme_ls_req_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) atomic_inc(&lport->xmt_ls_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * __lpfc_nvme_ls_abort - Generic service routine to abort a prior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * NVME LS request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * @vport: The local port that issued the LS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * @ndlp: The remote port the LS was sent to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * @pnvme_lsreq: Pointer to LS request structure from the transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * The driver validates the ndlp, looks for the LS, and aborts the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * LS if found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * 0 : if LS found and aborted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) * non-zero: various error conditions in form -Exxx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) __lpfc_nvme_ls_abort(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) struct nvmefc_ls_req *pnvme_lsreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) struct lpfc_hba *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) struct lpfc_sli_ring *pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) struct lpfc_iocbq *wqe, *next_wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) bool foundit = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) if (!ndlp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) "6049 NVMEx LS REQ Abort: Bad NDLP x%px DID "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) "x%06x, Failing LS Req\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ndlp, ndlp ? ndlp->nlp_DID : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) "6040 NVMEx LS REQ Abort: Issue LS_ABORT for lsreq "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) "x%p rqstlen:%d rsplen:%d %pad %pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) pnvme_lsreq, pnvme_lsreq->rqstlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) &pnvme_lsreq->rspdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * Lock the ELS ring txcmplq and look for the wqe that matches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * this ELS. If found, issue an abort on the wqe.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) pring = phba->sli4_hba.nvmels_wq->pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) spin_lock(&pring->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) if (wqe->context2 == pnvme_lsreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) foundit = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) spin_unlock(&pring->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (foundit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) lpfc_sli_issue_abort_iotag(phba, pring, wqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) if (foundit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) "6213 NVMEx LS REQ Abort: Unable to locate req x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) pnvme_lsreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) lpfc_nvme_xmt_ls_rsp(struct nvme_fc_local_port *localport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) struct nvme_fc_remote_port *remoteport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) struct nvmefc_ls_rsp *ls_rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct lpfc_async_xchg_ctx *axchg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) container_of(ls_rsp, struct lpfc_async_xchg_ctx, ls_rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct lpfc_nvme_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) if (axchg->phba->pport->load_flag & FC_UNLOADING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) lport = (struct lpfc_nvme_lport *)localport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) rc = __lpfc_nvme_xmt_ls_rsp(axchg, ls_rsp, __lpfc_nvme_xmt_ls_rsp_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * unless the failure is due to having already sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) * the response, an abort will be generated for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * exchange if the rsp can't be sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) if (rc != -EALREADY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) atomic_inc(&lport->xmt_ls_abort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * lpfc_nvme_ls_abort - Abort a prior NVME LS request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * @pnvme_lport: Transport localport that LS is to be issued from.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * @pnvme_rport: Transport remoteport that LS is to be sent to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * @pnvme_lsreq: the transport nvme_ls_req structure for the LS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * Driver registers this routine to abort a NVME LS request that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * in progress (from the transports perspective).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct nvme_fc_remote_port *pnvme_rport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) struct nvmefc_ls_req *pnvme_lsreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) struct lpfc_nvme_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct lpfc_vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) struct lpfc_hba *phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) if (unlikely(!lport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) vport = lport->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (vport->load_flag & FC_UNLOADING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) ret = __lpfc_nvme_ls_abort(vport, ndlp, pnvme_lsreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) atomic_inc(&lport->xmt_ls_abort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) /* Fix up the existing sgls for NVME IO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) struct lpfc_io_buf *lpfc_ncmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) struct nvmefc_fcp_req *nCmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) struct lpfc_hba *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct sli4_sge *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) union lpfc_wqe128 *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) uint32_t *wptr, *dptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) * Get a local pointer to the built-in wqe and correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) * the cmd size to match NVME's 96 bytes and fix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) * the dma address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) wqe = &lpfc_ncmd->cur_iocbq.wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * match NVME. NVME sends 96 bytes. Also, use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * nvme commands command and response dma addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * rather than the virtual memory to ease the restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) sgl = lpfc_ncmd->dma_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) if (phba->cfg_nvme_embed_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) sgl->addr_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) sgl->addr_lo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) /* Word 0-2 - NVME CMND IU (embedded payload) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) wqe->generic.bde.tus.f.bdeSize = 56;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) wqe->generic.bde.addrHigh = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) wqe->generic.bde.addrLow = 64; /* Word 16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) /* Word 10 - dbde is 0, wqes is 1 in template */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * Embed the payload in the last half of the WQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) * WQE words 16-30 get the NVME CMD IU payload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * WQE words 16-19 get payload Words 1-4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * WQE words 20-21 get payload Words 6-7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * WQE words 22-29 get payload Words 16-23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) wptr = &wqe->words[16]; /* WQE ptr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) dptr++; /* Skip Word 0 in payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) *wptr++ = *dptr++; /* Word 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) *wptr++ = *dptr++; /* Word 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) *wptr++ = *dptr++; /* Word 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) *wptr++ = *dptr++; /* Word 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) dptr++; /* Skip Word 5 in payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) *wptr++ = *dptr++; /* Word 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) *wptr++ = *dptr++; /* Word 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) dptr += 8; /* Skip Words 8-15 in payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) *wptr++ = *dptr++; /* Word 16 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) *wptr++ = *dptr++; /* Word 17 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) *wptr++ = *dptr++; /* Word 18 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) *wptr++ = *dptr++; /* Word 19 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) *wptr++ = *dptr++; /* Word 20 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) *wptr++ = *dptr++; /* Word 21 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) *wptr++ = *dptr++; /* Word 22 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) *wptr = *dptr; /* Word 23 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->cmddma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->cmddma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /* Word 0-2 - NVME CMND IU Inline BDE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) wqe->generic.bde.tus.f.bdeSize = nCmd->cmdlen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) wqe->generic.bde.addrHigh = sgl->addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) wqe->generic.bde.addrLow = sgl->addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /* Word 10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) sgl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /* Setup the physical region for the FCP RSP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) sgl->word2 = le32_to_cpu(sgl->word2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) if (nCmd->sg_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) bf_set(lpfc_sli4_sge_last, sgl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) bf_set(lpfc_sli4_sge_last, sgl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) sgl->word2 = cpu_to_le32(sgl->word2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) sgl->sge_len = cpu_to_le32(nCmd->rsplen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * Driver registers this routine as it io request handler. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * data structure to the rport indicated in @lpfc_nvme_rport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * Return value :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * TODO: What are the failure codes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) struct lpfc_wcqe_complete *wcqe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) struct lpfc_io_buf *lpfc_ncmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) (struct lpfc_io_buf *)pwqeIn->context1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct lpfc_vport *vport = pwqeIn->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) struct nvmefc_fcp_req *nCmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) struct nvme_fc_ersp_iu *ep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct nvme_fc_cmd_iu *cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) struct lpfc_nvme_fcpreq_priv *freqpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) struct lpfc_nvme_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) uint32_t code, status, idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) uint16_t cid, sqhd, data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) uint32_t *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) /* Sanity check on return of outstanding command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) if (!lpfc_ncmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) "6071 Null lpfc_ncmd pointer. No "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) "release, skip completion\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) /* Guard against abort handler being called at same time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) spin_lock(&lpfc_ncmd->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) if (!lpfc_ncmd->nvmeCmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) spin_unlock(&lpfc_ncmd->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) "6066 Missing cmpl ptrs: lpfc_ncmd x%px, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) "nvmeCmd x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) lpfc_ncmd, lpfc_ncmd->nvmeCmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) /* Release the lpfc_ncmd regardless of the missing elements. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) lpfc_release_nvme_buf(phba, lpfc_ncmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) nCmd = lpfc_ncmd->nvmeCmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) status = bf_get(lpfc_wcqe_c_status, wcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (unlikely(status && vport->localport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) lport = (struct lpfc_nvme_lport *)vport->localport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) if (lport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) if (bf_get(lpfc_wcqe_c_xb, wcqe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) atomic_inc(&lport->cmpl_fcp_xb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) atomic_inc(&lport->cmpl_fcp_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) lpfc_ncmd->cur_iocbq.sli4_xritag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) status, wcqe->parameter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * Catch race where our node has transitioned, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * transport is still transitioning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) ndlp = lpfc_ncmd->ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) "6062 Ignoring NVME cmpl. No ndlp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) goto out_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) code = bf_get(lpfc_wcqe_c_code, wcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) if (code == CQE_CODE_NVME_ERSP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /* For this type of CQE, we need to rebuild the rsp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * Get Command Id from cmd to plug into response. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * code is not needed in the next NVME Transport drop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) cid = cp->sqe.common.command_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * RSN is in CQE word 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * SQHD is in CQE Word 3 bits 15:0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * Cmd Specific info is in CQE Word 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) * and in CQE Word 0 bits 15:0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) /* Now lets build the NVME ERSP IU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) ep->iu_len = cpu_to_be16(8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) ep->rsn = wcqe->parameter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) ep->rsvd12 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) ptr = (uint32_t *)&ep->cqe.result.u64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) *ptr++ = wcqe->total_data_placed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) *ptr = (uint32_t)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) ep->cqe.sq_head = sqhd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) ep->cqe.sq_id = nCmd->sqid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) ep->cqe.command_id = cid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) ep->cqe.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) lpfc_ncmd->status = IOSTAT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) lpfc_ncmd->result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) nCmd->transferred_length = nCmd->payload_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) lpfc_ncmd->status = (status & LPFC_IOCB_STATUS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) lpfc_ncmd->result = (wcqe->parameter & IOERR_PARAM_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /* For NVME, the only failure path that results in an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * IO error is when the adapter rejects it. All other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * conditions are a success case and resolved by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * transport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) * IOSTAT_FCP_RSP_ERROR means:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) * 1. Length of data received doesn't match total
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) * transfer length in WQE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * 2. If the RSP payload does NOT match these cases:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) * a. RSP length 12/24 bytes and all zeros
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) * b. NVME ERSP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) switch (lpfc_ncmd->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) case IOSTAT_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) nCmd->transferred_length = wcqe->total_data_placed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) nCmd->rcv_rsplen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) nCmd->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) case IOSTAT_FCP_RSP_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) nCmd->transferred_length = wcqe->total_data_placed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) nCmd->rcv_rsplen = wcqe->parameter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) nCmd->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) /* Sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) "6081 NVME Completion Protocol Error: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) "xri %x status x%x result x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) "placed x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) lpfc_ncmd->cur_iocbq.sli4_xritag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) lpfc_ncmd->status, lpfc_ncmd->result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) wcqe->total_data_placed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) case IOSTAT_LOCAL_REJECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) /* Let fall through to set command final state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) lpfc_printf_vlog(vport, KERN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) "6032 Delay Aborted cmd x%px "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) "nvme cmd x%px, xri x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) "xb %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) lpfc_ncmd, nCmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) lpfc_ncmd->cur_iocbq.sli4_xritag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) bf_get(lpfc_wcqe_c_xb, wcqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) out_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) "6072 NVME Completion Error: xri %x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) "status x%x result x%x [x%x] "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) "placed x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) lpfc_ncmd->cur_iocbq.sli4_xritag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) lpfc_ncmd->status, lpfc_ncmd->result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) wcqe->parameter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) wcqe->total_data_placed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) nCmd->transferred_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) nCmd->rcv_rsplen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) nCmd->status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) /* pick up SLI4 exhange busy condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (bf_get(lpfc_wcqe_c_xb, wcqe))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) /* Update stats and complete the IO. There is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * no need for dma unprep because the nvme_transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) * owns the dma address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) if (lpfc_ncmd->ts_cmd_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) lpfc_ncmd->ts_data_io = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) lpfc_io_ktime(phba, lpfc_ncmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) if (lpfc_ncmd->cpu != cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) lpfc_printf_vlog(vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) "6701 CPU Check cmpl: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) "cpu %d expect %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) cpu, lpfc_ncmd->cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /* NVME targets need completion held off until the abort exchange
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) * completes unless the NVME Rport is getting unregistered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) if (!(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) freqpriv = nCmd->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) freqpriv->nvme_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) lpfc_ncmd->nvmeCmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) spin_unlock(&lpfc_ncmd->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) nCmd->done(nCmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) spin_unlock(&lpfc_ncmd->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) /* Call release with XB=1 to queue the IO into the abort list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) lpfc_release_nvme_buf(phba, lpfc_ncmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * @vport: pointer to a host virtual N_Port data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) * @lpfcn_cmd: Pointer to lpfc scsi command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * @pnode: pointer to a node-list data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * @cstat: pointer to the control status structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) * Driver registers this routine as it io request handler. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) * data structure to the rport indicated in @lpfc_nvme_rport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) * Return value :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) * TODO: What are the failure codes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) struct lpfc_io_buf *lpfc_ncmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct lpfc_nodelist *pnode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) struct lpfc_fc4_ctrl_stat *cstat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct lpfc_hba *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) union lpfc_wqe128 *wqe = &pwqeq->wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) uint32_t req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (!NLP_CHK_NODE_ACT(pnode))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) * There are three possibilities here - use scatter-gather segment, use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) * the single mapping, or neither.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) if (nCmd->sg_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) /* From the iwrite template, initialize words 7 - 11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) memcpy(&wqe->words[7],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) &lpfc_iwrite_cmd_template.words[7],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) sizeof(uint32_t) * 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) /* Word 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) wqe->fcp_iwrite.total_xfer_len = nCmd->payload_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) /* Word 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) if ((phba->cfg_nvme_enable_fb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) (pnode->nlp_flag & NLP_FIRSTBURST)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) req_len = lpfc_ncmd->nvmeCmd->payload_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (req_len < pnode->nvme_fb_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) wqe->fcp_iwrite.initial_xfer_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) req_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) wqe->fcp_iwrite.initial_xfer_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) pnode->nvme_fb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) wqe->fcp_iwrite.initial_xfer_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) cstat->output_requests++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) /* From the iread template, initialize words 7 - 11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) memcpy(&wqe->words[7],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) &lpfc_iread_cmd_template.words[7],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) sizeof(uint32_t) * 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* Word 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) /* Word 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) wqe->fcp_iread.rsrvd5 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) cstat->input_requests++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) /* From the icmnd template, initialize words 4 - 11 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) memcpy(&wqe->words[4], &lpfc_icmnd_cmd_template.words[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) sizeof(uint32_t) * 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) cstat->control_requests++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) if (pnode->nlp_nvme_info & NLP_NVME_NSLER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) bf_set(wqe_erp, &wqe->generic.wqe_com, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * Finish initializing those WQE fields that are independent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * of the nvme_cmnd request_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) /* Word 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) bf_set(payload_offset_len, &wqe->fcp_icmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) (nCmd->rsplen + nCmd->cmdlen));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /* Word 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) /* Word 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) /* Word 9 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) /* Words 13 14 15 are for PBDE support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) pwqeq->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * @vport: pointer to a host virtual N_Port data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) * @lpfcn_cmd: Pointer to lpfc scsi command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) * Driver registers this routine as it io request handler. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) * data structure to the rport indicated in @lpfc_nvme_rport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) * Return value :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * TODO: What are the failure codes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) struct lpfc_io_buf *lpfc_ncmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) struct lpfc_hba *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) struct sli4_hybrid_sgl *sgl_xtra = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) struct scatterlist *data_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) struct sli4_sge *first_data_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct ulp_bde64 *bde;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) dma_addr_t physaddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) uint32_t num_bde = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) uint32_t dma_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) uint32_t dma_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) int nseg, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) bool lsp_just_set = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /* Fix up the command and response DMA stuff. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * There are three possibilities here - use scatter-gather segment, use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * the single mapping, or neither.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) if (nCmd->sg_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * Jump over the cmd and rsp SGEs. The fix routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * has already adjusted for this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) sgl += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) first_data_sgl = sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (lpfc_ncmd->seg_cnt > lpfc_nvme_template.max_sgl_segments) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) "6058 Too many sg segments from "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) "NVME Transport. Max %d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) "nvmeIO sg_cnt %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) phba->cfg_nvme_seg_cnt + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) lpfc_ncmd->seg_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) lpfc_ncmd->seg_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) * The driver established a maximum scatter-gather segment count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) * during probe that limits the number of sg elements in any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) * single nvme command. Just run through the seg_cnt and format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) * the sge's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) nseg = nCmd->sg_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) data_sg = nCmd->first_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) /* for tracking the segment boundaries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) j = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) for (i = 0; i < nseg; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) if (data_sg == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) "6059 dptr err %d, nseg %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) i, nseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) lpfc_ncmd->seg_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) sgl->word2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) if ((num_bde + 1) == nseg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) bf_set(lpfc_sli4_sge_last, sgl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) bf_set(lpfc_sli4_sge_type, sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) LPFC_SGE_TYPE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) bf_set(lpfc_sli4_sge_last, sgl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) /* expand the segment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (!lsp_just_set &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) !((j + 1) % phba->border_sge_num) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) ((nseg - 1) != i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) /* set LSP type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) bf_set(lpfc_sli4_sge_type, sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) LPFC_SGE_TYPE_LSP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) sgl_xtra = lpfc_get_sgl_per_hdwq(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) phba, lpfc_ncmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) if (unlikely(!sgl_xtra)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) lpfc_ncmd->seg_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) sgl->addr_lo = cpu_to_le32(putPaddrLow(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) sgl_xtra->dma_phys_sgl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) sgl->addr_hi = cpu_to_le32(putPaddrHigh(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) sgl_xtra->dma_phys_sgl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) bf_set(lpfc_sli4_sge_type, sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) LPFC_SGE_TYPE_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) if (!(bf_get(lpfc_sli4_sge_type, sgl) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) LPFC_SGE_TYPE_LSP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if ((nseg - 1) == i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) bf_set(lpfc_sli4_sge_last, sgl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) physaddr = data_sg->dma_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) dma_len = data_sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) sgl->addr_lo = cpu_to_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) putPaddrLow(physaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) sgl->addr_hi = cpu_to_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) putPaddrHigh(physaddr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) sgl->word2 = cpu_to_le32(sgl->word2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) sgl->sge_len = cpu_to_le32(dma_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) dma_offset += dma_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) data_sg = sg_next(data_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) sgl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) lsp_just_set = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) sgl->word2 = cpu_to_le32(sgl->word2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) sgl->sge_len = cpu_to_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) phba->cfg_sg_dma_buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) sgl = (struct sli4_sge *)sgl_xtra->dma_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) i = i - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) lsp_just_set = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) if (phba->cfg_enable_pbde) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) /* Use PBDE support for first SGL only, offset == 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) /* Words 13-15 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) bde = (struct ulp_bde64 *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) &wqe->words[13];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) bde->addrLow = first_data_sgl->addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) bde->addrHigh = first_data_sgl->addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) bde->tus.f.bdeSize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) le32_to_cpu(first_data_sgl->sge_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) bde->tus.w = cpu_to_le32(bde->tus.w);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) /* wqe_pbde is 1 in template */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) lpfc_ncmd->seg_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) /* For this clause to be valid, the payload_length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) * and sg_cnt must zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) if (nCmd->payload_length != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) "6063 NVME DMA Prep Err: sg_cnt %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) "payload_length x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) nCmd->sg_cnt, nCmd->payload_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) * @lpfc_pnvme: Pointer to the driver's nvme instance data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) * @lpfc_nvme_lport: Pointer to the driver's local port data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) * Driver registers this routine as it io request handler. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) * data structure to the rport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) indicated in @lpfc_nvme_rport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) * Return value :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) * 0 - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) * TODO: What are the failure codes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) struct nvme_fc_remote_port *pnvme_rport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) void *hw_queue_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) struct nvmefc_fcp_req *pnvme_fcreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) int expedite = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) int idx, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) struct lpfc_nvme_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) struct lpfc_fc4_ctrl_stat *cstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) struct lpfc_vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) struct lpfc_hba *phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) struct lpfc_io_buf *lpfc_ncmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) struct lpfc_nvme_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) struct lpfc_nvme_qhandle *lpfc_queue_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) struct lpfc_nvme_fcpreq_priv *freqpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) struct nvme_common_command *sqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) uint64_t start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) /* Validate pointers. LLDD fault handling with transport does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) * have timing races.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) if (unlikely(!lport)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) vport = lport->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (unlikely(!hw_queue_handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) "6117 Fail IO, NULL hw_queue_handle\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) atomic_inc(&lport->xmt_fcp_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) if (unlikely(vport->load_flag & FC_UNLOADING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) "6124 Fail IO, Driver unload\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) atomic_inc(&lport->xmt_fcp_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) freqpriv = pnvme_fcreq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) if (unlikely(!freqpriv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) "6158 Fail IO, NULL request data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) atomic_inc(&lport->xmt_fcp_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) if (phba->ktime_on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) start = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) * Catch race where our node has transitioned, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) * transport is still transitioning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) ndlp = rport->ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) "6053 Busy IO, ndlp not ready: rport x%px "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) "ndlp x%px, DID x%06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) rport, ndlp, pnvme_rport->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) atomic_inc(&lport->xmt_fcp_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) /* The remote node has to be a mapped target or it's an error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) "6036 Fail IO, DID x%06x not ready for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) "IO. State x%x, Type x%x Flg x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) pnvme_rport->port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) ndlp->nlp_state, ndlp->nlp_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) ndlp->upcall_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) atomic_inc(&lport->xmt_fcp_bad_ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) /* Currently only NVME Keep alive commands should be expedited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * if the driver runs out of a resource. These should only be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) * issued on the admin queue, qidx 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (!lpfc_queue_info->qidx && !pnvme_fcreq->sg_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) sqe = &((struct nvme_fc_cmd_iu *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) pnvme_fcreq->cmdaddr)->sqe.common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) if (sqe->opcode == nvme_admin_keep_alive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) expedite = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) /* The node is shared with FCP IO, make sure the IO pending count does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) * not exceed the programmed depth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) !expedite) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) "6174 Fail IO, ndlp qdepth exceeded: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) "idx %d DID %x pend %d qdepth %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) lpfc_queue_info->index, ndlp->nlp_DID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) atomic_read(&ndlp->cmd_pending),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) ndlp->cmd_qdepth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) atomic_inc(&lport->xmt_fcp_qdepth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) /* Lookup Hardware Queue index based on fcp_io_sched module parameter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) idx = lpfc_queue_info->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) idx = phba->sli4_hba.cpu_map[cpu].hdwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, idx, expedite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) if (lpfc_ncmd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) atomic_inc(&lport->xmt_fcp_noxri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) "6065 Fail IO, driver buffer pool is empty: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) "idx %d DID %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) lpfc_queue_info->index, ndlp->nlp_DID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) goto out_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) if (start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) lpfc_ncmd->ts_cmd_start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) lpfc_ncmd->ts_cmd_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) * Store the data needed by the driver to issue, abort, and complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) * an IO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) * Do not let the IO hang out forever. There is no midlayer issuing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) * an abort so inform the FW of the maximum IO pending time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) freqpriv->nvme_buf = lpfc_ncmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) lpfc_ncmd->nvmeCmd = pnvme_fcreq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) lpfc_ncmd->ndlp = ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) lpfc_ncmd->qidx = lpfc_queue_info->qidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) * Issue the IO on the WQ indicated by index in the hw_queue_handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) * This identfier was create in our hardware queue create callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) * routine. The driver now is dependent on the IO queue steering from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) * the transport. We are trusting the upper NVME layers know which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) * index to use and that they have affinitized a CPU to this hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) lpfc_ncmd->cur_iocbq.hba_wqidx = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) cstat = &phba->sli4_hba.hdwq[idx].nvme_cstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp, cstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) "6175 Fail IO, Prep DMA: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) "idx %d DID %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) lpfc_queue_info->index, ndlp->nlp_DID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) atomic_inc(&lport->xmt_fcp_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) goto out_free_nvme_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) lpfc_ncmd->cur_iocbq.sli4_xritag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) lpfc_queue_info->index, ndlp->nlp_DID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) atomic_inc(&lport->xmt_fcp_wqerr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) "6113 Fail IO, Could not issue WQE err %x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) "sid: x%x did: x%x oxid: x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) ret, vport->fc_myDID, ndlp->nlp_DID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) lpfc_ncmd->cur_iocbq.sli4_xritag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) goto out_free_nvme_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (phba->cfg_xri_rebalancing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_ncmd->hdwq_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) if (lpfc_ncmd->ts_cmd_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) cpu = raw_smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) lpfc_ncmd->cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (idx != cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) lpfc_printf_vlog(vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) "6702 CPU Check cmd: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) "cpu %d wq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) lpfc_ncmd->cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) lpfc_queue_info->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) out_free_nvme_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) if (lpfc_ncmd->nvmeCmd->sg_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) cstat->output_requests--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) cstat->input_requests--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) cstat->control_requests--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) lpfc_release_nvme_buf(phba, lpfc_ncmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) out_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) * @phba: Pointer to HBA context object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) * @cmdiocb: Pointer to command iocb object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) * @rspiocb: Pointer to response iocb object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) * This is the callback function for any NVME FCP IO that was aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) struct lpfc_wcqe_complete *abts_cmpl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) "6145 ABORT_XRI_CN completing on rpi x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) "original iotag x%x, abort cmd iotag x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) "req_tag x%x, status x%x, hwstatus x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) cmdiocb->iocb.un.acxri.abortContextTag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) cmdiocb->iocb.un.acxri.abortIoTag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) cmdiocb->iotag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) bf_get(lpfc_wcqe_c_status, abts_cmpl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) lpfc_sli_release_iocbq(phba, cmdiocb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) * @lpfc_pnvme: Pointer to the driver's nvme instance data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) * @lpfc_nvme_lport: Pointer to the driver's local port data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) * Driver registers this routine as its nvme request io abort handler. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * data structure to the rport indicated in @lpfc_nvme_rport. This routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * is executed asynchronously - one the target is validated as "MAPPED" and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) * ready for IO, the driver issues the abort request and returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) struct nvme_fc_remote_port *pnvme_rport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) void *hw_queue_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) struct nvmefc_fcp_req *pnvme_fcreq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) struct lpfc_nvme_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) struct lpfc_vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) struct lpfc_hba *phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) struct lpfc_io_buf *lpfc_nbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) struct lpfc_iocbq *abts_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) struct lpfc_iocbq *nvmereq_wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) struct lpfc_nvme_fcpreq_priv *freqpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) int ret_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) /* Validate pointers. LLDD fault handling with transport does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) * have timing races.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) if (unlikely(!lport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) vport = lport->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) if (unlikely(!hw_queue_handle)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) "6129 Fail Abort, HW Queue Handle NULL.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) freqpriv = pnvme_fcreq->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) if (unlikely(!freqpriv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) if (vport->load_flag & FC_UNLOADING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) /* Announce entry to new IO submit field. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) "6002 Abort Request to rport DID x%06x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) "for nvme_fc_req x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) pnvme_rport->port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) pnvme_fcreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) /* If the hba is getting reset, this flag is set. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) * cleared when the reset is complete and rings reestablished.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) spin_lock_irqsave(&phba->hbalock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) /* driver queued commands are in process of being flushed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (phba->hba_flag & HBA_IOQ_FLUSH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) spin_unlock_irqrestore(&phba->hbalock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) "6139 Driver in reset cleanup - flushing "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) "NVME Req now. hba_flag x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) phba->hba_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) lpfc_nbuf = freqpriv->nvme_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) if (!lpfc_nbuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) spin_unlock_irqrestore(&phba->hbalock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) "6140 NVME IO req has no matching lpfc nvme "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) "io buffer. Skipping abort req.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) } else if (!lpfc_nbuf->nvmeCmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) spin_unlock_irqrestore(&phba->hbalock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) "6141 lpfc NVME IO req has no nvme_fcreq "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) "io buffer. Skipping abort req.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) /* Guard against IO completion being called at same time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) spin_lock(&lpfc_nbuf->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) * state must match the nvme_fcreq passed by the nvme
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) * transport. If they don't match, it is likely the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) * has already completed the NVME IO and the nvme transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) * has not seen it yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) "6143 NVME req mismatch: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) "lpfc_nbuf x%px nvmeCmd x%px, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) "pnvme_fcreq x%px. Skipping Abort xri x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) lpfc_nbuf, lpfc_nbuf->nvmeCmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) pnvme_fcreq, nvmereq_wqe->sli4_xritag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) /* Don't abort IOs no longer on the pending queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) "6142 NVME IO req x%px not queued - skipping "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) "abort req xri x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) pnvme_fcreq, nvmereq_wqe->sli4_xritag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) atomic_inc(&lport->xmt_fcp_abort);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) nvmereq_wqe->sli4_xritag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) /* Outstanding abort is in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) "6144 Outstanding NVME I/O Abort Request "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) "still pending on nvme_fcreq x%px, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) "lpfc_ncmd %px xri x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) pnvme_fcreq, lpfc_nbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) nvmereq_wqe->sli4_xritag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) abts_buf = __lpfc_sli_get_iocbq(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) if (!abts_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) "6136 No available abort wqes. Skipping "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) "Abts req for nvme_fcreq x%px xri x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) pnvme_fcreq, nvmereq_wqe->sli4_xritag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) /* Ready - mark outstanding as aborted by driver. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) lpfc_nvme_prep_abort_wqe(abts_buf, nvmereq_wqe->sli4_xritag, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) /* ABTS WQE must go to the same WQ as the WQE to be aborted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) abts_buf->iocb_flag |= LPFC_IO_NVME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) abts_buf->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) spin_unlock(&lpfc_nbuf->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) spin_unlock_irqrestore(&phba->hbalock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) if (ret_val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) "6137 Failed abts issue_wqe with status x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) "for nvme_fcreq x%px.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) ret_val, pnvme_fcreq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) lpfc_sli_release_iocbq(phba, abts_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) "6138 Transport Abort NVME Request Issued for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) "ox_id x%x on reqtag x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) nvmereq_wqe->sli4_xritag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) abts_buf->iotag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) spin_unlock(&lpfc_nbuf->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) spin_unlock_irqrestore(&phba->hbalock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) /* Declare and initialization an instance of the FC NVME template. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) static struct nvme_fc_port_template lpfc_nvme_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) /* initiator-based functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) .localport_delete = lpfc_nvme_localport_delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) .remoteport_delete = lpfc_nvme_remoteport_delete,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) .create_queue = lpfc_nvme_create_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) .delete_queue = lpfc_nvme_delete_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) .ls_req = lpfc_nvme_ls_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) .fcp_io = lpfc_nvme_fcp_io_submit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) .ls_abort = lpfc_nvme_ls_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) .fcp_abort = lpfc_nvme_fcp_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) .xmt_ls_rsp = lpfc_nvme_xmt_ls_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) .max_hw_queues = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) .dma_boundary = 0xFFFFFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) /* Sizes of additional private data for data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) * No use for the last two sizes at this time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) .local_priv_sz = sizeof(struct lpfc_nvme_lport),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) .lsrqst_priv_sz = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) .fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) * lpfc_get_nvme_buf - Get a nvme buffer from io_buf_list of the HBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * @phba: The HBA for which this call is being executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) * This routine removes a nvme buffer from head of @hdwq io_buf_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) * and returns to caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) * Return codes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) * NULL - Error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) * Pointer to lpfc_nvme_buf - Success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) static struct lpfc_io_buf *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) int idx, int expedite)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) struct lpfc_io_buf *lpfc_ncmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) struct lpfc_sli4_hdw_queue *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) struct sli4_sge *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) struct lpfc_iocbq *pwqeq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) union lpfc_wqe128 *wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) lpfc_ncmd = lpfc_get_io_buf(phba, NULL, idx, expedite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (lpfc_ncmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) pwqeq = &(lpfc_ncmd->cur_iocbq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) wqe = &pwqeq->wqe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) /* Setup key fields in buffer that may have been changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) * if other protocols used this buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) pwqeq->iocb_flag = LPFC_IO_NVME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) lpfc_ncmd->start_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) lpfc_ncmd->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) /* Rsp SGE will be filled in when we rcv an IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) * from the NVME Layer to be sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) * The cmd is going to be embedded so we need a SKIP SGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) sgl = lpfc_ncmd->dma_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) bf_set(lpfc_sli4_sge_last, sgl, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) sgl->word2 = cpu_to_le32(sgl->word2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) /* Fill in word 3 / sgl_len during cmd submission */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) /* Initialize 64 bytes only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) memset(wqe, 0, sizeof(union lpfc_wqe));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) atomic_inc(&ndlp->cmd_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) lpfc_ncmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) qp = &phba->sli4_hba.hdwq[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) qp->empty_io_bufs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) return lpfc_ncmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) * @phba: The Hba for which this call is being executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) * @lpfc_ncmd: The nvme buffer which is being released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) * lpfc_io_buf_list list. For SLI4 XRI's are tied to the nvme buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) * and cannot be reused for at least RA_TOV amount of time if it was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) * aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) struct lpfc_sli4_hdw_queue *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) unsigned long iflag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) if ((lpfc_ncmd->flags & LPFC_SBUF_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) lpfc_ncmd->ndlp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) qp = lpfc_ncmd->hdwq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) "6310 XB release deferred for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) "ox_id x%x on reqtag x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) lpfc_ncmd->cur_iocbq.sli4_xritag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) lpfc_ncmd->cur_iocbq.iotag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) list_add_tail(&lpfc_ncmd->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) &qp->lpfc_abts_io_buf_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) qp->abts_nvme_io_bufs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) * @pvport - the lpfc_vport instance requesting a localport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) * This routine is invoked to create an nvme localport instance to bind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) * to the nvme_fc_transport. It is called once during driver load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) * like lpfc_create_shost after all other services are initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) * It requires a vport, vpi, and wwns at call time. Other localport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) * parameters are modified as the driver's FCID and the Fabric WWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) * are established.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) * Return codes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) * 0 - successful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) * -ENOMEM - no heap memory available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) * other values - from nvme registration upcall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) lpfc_nvme_create_localport(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) struct lpfc_hba *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) struct nvme_fc_port_info nfcp_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) struct nvme_fc_local_port *localport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) struct lpfc_nvme_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) /* Initialize this localport instance. The vport wwn usage ensures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) * that NPIV is accounted for.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) /* We need to tell the transport layer + 1 because it takes page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) * alignment into account. When space for the SGL is allocated we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) * allocate + 3, one for cmd, one for rsp and one for this alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) /* Advertise how many hw queues we support based on cfg_hdw_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) * which will not exceed cpu count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (!IS_ENABLED(CONFIG_NVME_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) /* localport is allocated from the stack, but the registration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) * call allocates heap memory as well as the private area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) &vport->phba->pcidev->dev, &localport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) "6005 Successfully registered local "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) "NVME port num %d, localP x%px, private "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) "x%px, sg_seg %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) localport->port_num, localport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) localport->private,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) lpfc_nvme_template.max_sgl_segments);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) /* Private is our lport size declared in the template. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) lport = (struct lpfc_nvme_lport *)localport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) vport->localport = localport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) lport->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) vport->nvmei_support = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) atomic_set(&lport->xmt_fcp_noxri, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) atomic_set(&lport->xmt_fcp_bad_ndlp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) atomic_set(&lport->xmt_fcp_qdepth, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) atomic_set(&lport->xmt_fcp_err, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) atomic_set(&lport->xmt_fcp_wqerr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) atomic_set(&lport->xmt_fcp_abort, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) atomic_set(&lport->xmt_ls_abort, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) atomic_set(&lport->xmt_ls_err, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) atomic_set(&lport->cmpl_fcp_xb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) atomic_set(&lport->cmpl_fcp_err, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) atomic_set(&lport->cmpl_ls_xb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) atomic_set(&lport->cmpl_ls_err, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) atomic_set(&lport->fc4NvmeLsRequests, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) atomic_set(&lport->fc4NvmeLsCmpls, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) #if (IS_ENABLED(CONFIG_NVME_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) /* lpfc_nvme_lport_unreg_wait - Wait for the host to complete an lport unreg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) * The driver has to wait for the host nvme transport to callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) * indicating the localport has successfully unregistered all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) * resources. Since this is an uninterruptible wait, loop every ten
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) * seconds and print a message indicating no progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) * An uninterruptible wait is used because of the risk of transport-to-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) * driver state mismatch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) struct lpfc_nvme_lport *lport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) struct completion *lport_unreg_cmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) u32 wait_tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) int ret, i, pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) struct lpfc_sli_ring *pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) struct lpfc_hba *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) struct lpfc_sli4_hdw_queue *qp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) int abts_scsi, abts_nvme;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) /* Host transport has to clean up and confirm requiring an indefinite
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) * wait. Print a message if a 10 second wait expires and renew the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) * wait. This is unexpected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) if (unlikely(!ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) abts_scsi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) abts_nvme = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) for (i = 0; i < phba->cfg_hdw_queue; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) qp = &phba->sli4_hba.hdwq[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) pring = qp->io_wq->pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) if (!pring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) pending += pring->txcmplq_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) abts_scsi += qp->abts_scsi_io_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) abts_nvme += qp->abts_nvme_io_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) "6176 Lport x%px Localport x%px wait "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) "timed out. Pending %d [%d:%d]. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) "Renewing.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) lport, vport->localport, pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) abts_scsi, abts_nvme);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) "6177 Lport x%px Localport x%px Complete Success\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) lport, vport->localport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) * @pnvme: pointer to lpfc nvme data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) * This routine is invoked to destroy all lports bound to the phba.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) * The lport memory was allocated by the nvme fc transport and is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) * released there. This routine ensures all rports bound to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) * lport have been disconnected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) #if (IS_ENABLED(CONFIG_NVME_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) struct nvme_fc_local_port *localport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) struct lpfc_nvme_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) if (vport->nvmei_support == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) localport = vport->localport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) lport = (struct lpfc_nvme_lport *)localport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) "6011 Destroying NVME localport x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) localport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) /* lport's rport list is clear. Unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) * lport and release resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) lport->lport_unreg_cmp = &lport_unreg_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) ret = nvme_fc_unregister_localport(localport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) /* Wait for completion. This either blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) * indefinitely or succeeds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) vport->localport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) /* Regardless of the unregister upcall response, clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) * nvmei_support. All rports are unregistered and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) * driver will clean up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) vport->nvmei_support = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) lpfc_printf_vlog(vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) "6009 Unregistered lport Success\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) lpfc_printf_vlog(vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) "6010 Unregistered lport "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) "Failed, status x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) lpfc_nvme_update_localport(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) #if (IS_ENABLED(CONFIG_NVME_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) struct nvme_fc_local_port *localport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) struct lpfc_nvme_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) localport = vport->localport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) if (!localport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) "6710 Update NVME fail. No localport\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) lport = (struct lpfc_nvme_lport *)localport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) if (!lport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) "6171 Update NVME fail. localP x%px, No lport\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) localport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) "6012 Update NVME lport x%px did x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) localport, vport->fc_myDID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) localport->port_id = vport->fc_myDID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) if (localport->port_id == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) "6030 bound lport x%px to DID x%06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) lport, localport->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) #if (IS_ENABLED(CONFIG_NVME_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) struct nvme_fc_local_port *localport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) struct lpfc_nvme_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) struct lpfc_nvme_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) struct lpfc_nvme_rport *oldrport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) struct nvme_fc_remote_port *remote_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) struct nvme_fc_port_info rpinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) struct lpfc_nodelist *prev_ndlp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) struct fc_rport *srport = ndlp->rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) ndlp->nlp_DID, ndlp->nlp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) localport = vport->localport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) if (!localport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) lport = (struct lpfc_nvme_lport *)localport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) /* NVME rports are not preserved across devloss.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) * Just register this instance. Note, rpinfo->dev_loss_tmo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) * is left 0 to indicate accept transport defaults. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) * driver communicates port role capabilities consistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) * with the PRLI response data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) rpinfo.port_id = ndlp->nlp_DID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) if (ndlp->nlp_type & NLP_NVME_TARGET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) if (ndlp->nlp_type & NLP_NVME_INITIATOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) if (srport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) rpinfo.dev_loss_tmo = srport->dev_loss_tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) rpinfo.dev_loss_tmo = vport->cfg_devloss_tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) spin_lock_irq(&vport->phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) oldrport = lpfc_ndlp_get_nrport(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) if (oldrport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) prev_ndlp = oldrport->ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) spin_unlock_irq(&vport->phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) spin_unlock_irq(&vport->phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) lpfc_nlp_get(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) /* If the ndlp already has an nrport, this is just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) * a resume of the existing rport. Else this is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) * new rport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) /* Guard against an unregister/reregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) * race that leaves the WAIT flag set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) spin_lock_irq(&vport->phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) spin_unlock_irq(&vport->phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) rport = remote_port->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) if (oldrport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) /* Sever the ndlp<->rport association
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) * before dropping the ndlp ref from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) * register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) spin_lock_irq(&vport->phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) ndlp->nrport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) spin_unlock_irq(&vport->phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) rport->ndlp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) rport->remoteport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) /* Reference only removed if previous NDLP is no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) * active. It might be just a swap and removing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) * reference would cause a premature cleanup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) if (prev_ndlp && prev_ndlp != ndlp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) if ((!NLP_CHK_NODE_ACT(prev_ndlp)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) (!prev_ndlp->nrport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) lpfc_nlp_put(prev_ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) /* Clean bind the rport to the ndlp. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) rport->remoteport = remote_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) rport->lport = lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) rport->ndlp = ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) spin_lock_irq(&vport->phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) ndlp->nrport = rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) spin_unlock_irq(&vport->phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) lpfc_printf_vlog(vport, KERN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) LOG_NVME_DISC | LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) "6022 Bind lport x%px to remoteport x%px "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) "rport x%px WWNN 0x%llx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) "Rport WWPN 0x%llx DID "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) "x%06x Role x%x, ndlp %p prev_ndlp x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) lport, remote_port, rport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) rpinfo.node_name, rpinfo.port_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) rpinfo.port_id, rpinfo.port_role,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) ndlp, prev_ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) lpfc_printf_vlog(vport, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) "6031 RemotePort Registration failed "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) "err: %d, DID x%06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) ret, ndlp->nlp_DID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) * lpfc_nvme_rescan_port - Check to see if we should rescan this remoteport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) * If the ndlp represents an NVME Target, that we are logged into,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) * ping the NVME FC Transport layer to initiate a device rescan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) * on this remote NPort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) #if (IS_ENABLED(CONFIG_NVME_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) struct lpfc_nvme_rport *nrport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) struct nvme_fc_remote_port *remoteport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) spin_lock_irq(&vport->phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) nrport = lpfc_ndlp_get_nrport(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) if (nrport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) remoteport = nrport->remoteport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) spin_unlock_irq(&vport->phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) "6170 Rescan NPort DID x%06x type x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) "state x%x nrport x%px remoteport x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) nrport, remoteport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) if (!nrport || !remoteport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) goto rescan_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) /* Only rescan if we are an NVME target in the MAPPED state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) nvme_fc_rescan_remoteport(remoteport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) "6172 NVME rescanned DID x%06x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) "port_state x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) ndlp->nlp_DID, remoteport->port_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) rescan_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) "6169 Skip NVME Rport Rescan, NVME remoteport "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) "unregistered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) * There is no notion of Devloss or rport recovery from the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) * nvme_transport perspective. Loss of an rport just means IO cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) * be sent and recovery is completely up to the initator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) * For now, the driver just unbinds the DID and port_role so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) * no further IO can be issued. Changes are planned for later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) * Notes - the ndlp reference count is not decremented here since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) * since there is no nvme_transport api for devloss. Node ref count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) * is only adjusted in driver unload.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) #if (IS_ENABLED(CONFIG_NVME_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) struct nvme_fc_local_port *localport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) struct lpfc_nvme_lport *lport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) struct lpfc_nvme_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) struct nvme_fc_remote_port *remoteport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) localport = vport->localport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) /* This is fundamental error. The localport is always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) * available until driver unload. Just exit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) if (!localport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) lport = (struct lpfc_nvme_lport *)localport->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) if (!lport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) goto input_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) spin_lock_irq(&vport->phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) rport = lpfc_ndlp_get_nrport(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) if (rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) remoteport = rport->remoteport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) spin_unlock_irq(&vport->phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) if (!remoteport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) goto input_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) "6033 Unreg nvme remoteport x%px, portname x%llx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) "port_id x%06x, portstate x%x port type x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) remoteport, remoteport->port_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) remoteport->port_id, remoteport->port_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) ndlp->nlp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) /* Sanity check ndlp type. Only call for NVME ports. Don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) * clear any rport state until the transport calls back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) if (ndlp->nlp_type & NLP_NVME_TARGET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) /* No concern about the role change on the nvme remoteport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) * The transport will update it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) ndlp->upcall_flags |= NLP_WAIT_FOR_UNREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) /* Don't let the host nvme transport keep sending keep-alives
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) * on this remoteport. Vport is unloading, no recovery. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) * return values is ignored. The upcall is a courtesy to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) * transport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) if (vport->load_flag & FC_UNLOADING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) (void)nvme_fc_set_remoteport_devloss(remoteport, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) ret = nvme_fc_unregister_remoteport(remoteport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) if (ret != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) "6167 NVME unregister failed %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) "port_state x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) ret, remoteport->port_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) input_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) "6168 State error: lport x%px, rport x%px FCID x%06x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) vport->localport, ndlp->rport, ndlp->nlp_DID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) * @axri: pointer to the fcp xri abort wcqe structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) * @lpfc_ncmd: The nvme job structure for the request being aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) * This routine is invoked by the worker thread to process a SLI4 fast-path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) * NVME aborted xri. Aborted NVME IO commands are completed to the transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) * here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) struct sli4_wcqe_xri_aborted *axri,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) struct lpfc_io_buf *lpfc_ncmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) struct nvmefc_fcp_req *nvme_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) if (ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) lpfc_sli4_abts_err_handler(phba, ndlp, axri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) "6311 nvme_cmd %p xri x%x tag x%x abort complete and "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) "xri released\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) lpfc_ncmd->nvmeCmd, xri,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) lpfc_ncmd->cur_iocbq.iotag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) /* Aborted NVME commands are required to not complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) * before the abort exchange command fully completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) * Once completed, it is available via the put list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) if (lpfc_ncmd->nvmeCmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) nvme_cmd = lpfc_ncmd->nvmeCmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) nvme_cmd->done(nvme_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) lpfc_ncmd->nvmeCmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) lpfc_release_nvme_buf(phba, lpfc_ncmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) * lpfc_nvme_wait_for_io_drain - Wait for all NVME wqes to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) * @phba: Pointer to HBA context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) * This function flushes all wqes in the nvme rings and frees all resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) * in the txcmplq. This function does not issue abort wqes for the IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) * commands in txcmplq, they will just be returned with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) * slot has been permanently disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) struct lpfc_sli_ring *pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) u32 i, wait_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) /* Cycle through all IO rings and make sure all outstanding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) * WQEs have been removed from the txcmplqs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) for (i = 0; i < phba->cfg_hdw_queue; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) if (!phba->sli4_hba.hdwq[i].io_wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) pring = phba->sli4_hba.hdwq[i].io_wq->pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) if (!pring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) /* Retrieve everything on the txcmplq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) while (!list_empty(&pring->txcmplq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) wait_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) /* The sleep is 10mS. Every ten seconds,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) * dump a message. Something is wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) if ((wait_cnt % 1000) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) "6178 NVME IO not empty, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) "cnt %d\n", wait_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) #if (IS_ENABLED(CONFIG_NVME_FC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) struct lpfc_io_buf *lpfc_ncmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) struct nvmefc_fcp_req *nCmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) struct lpfc_nvme_fcpreq_priv *freqpriv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) if (!pwqeIn->context1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) lpfc_sli_release_iocbq(phba, pwqeIn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) /* For abort iocb just return, IO iocb will do a done call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) CMD_ABORT_XRI_CX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) lpfc_sli_release_iocbq(phba, pwqeIn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) spin_lock(&lpfc_ncmd->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) if (!lpfc_ncmd->nvmeCmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) spin_unlock(&lpfc_ncmd->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) lpfc_release_nvme_buf(phba, lpfc_ncmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) nCmd = lpfc_ncmd->nvmeCmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) "6194 NVME Cancel xri %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) lpfc_ncmd->cur_iocbq.sli4_xritag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) nCmd->transferred_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) nCmd->rcv_rsplen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) nCmd->status = NVME_SC_INTERNAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) freqpriv = nCmd->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) freqpriv->nvme_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) lpfc_ncmd->nvmeCmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) spin_unlock(&lpfc_ncmd->buf_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) nCmd->done(nCmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) /* Call release with XB=1 to queue the IO into the abort list. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) lpfc_release_nvme_buf(phba, lpfc_ncmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) }