Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*******************************************************************
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * This file is part of the Emulex Linux Device Driver for         *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Fibre Channel Host Bus Adapters.                                *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (C) 2017-2020 Broadcom. All Rights Reserved. The term *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * EMULEX and SLI are trademarks of Emulex.                        *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * www.broadcom.com                                                *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *                                                                 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * This program is free software; you can redistribute it and/or   *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * modify it under the terms of version 2 of the GNU General       *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * Public License as published by the Free Software Foundation.    *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * This program is distributed in the hope that it will be useful. *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * more details, a copy of which can be found in the file COPYING  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * included with this package.                                     *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *******************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/lockdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/utsname.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <scsi/scsi_transport_fc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <scsi/fc/fc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include "lpfc_hw4.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include "lpfc_hw.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include "lpfc_nl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include "lpfc_disc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include "lpfc_sli.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include "lpfc_sli4.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include "lpfc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include "lpfc_scsi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #include "lpfc_nvme.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #include "lpfc_logmsg.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include "lpfc_crtn.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include "lpfc_vport.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include "lpfc_debugfs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) /* AlpaArray for assignment of scsid for scan-down and bind_method */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) static uint8_t lpfcAlpaArray[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) static void lpfc_disc_timeout_handler(struct lpfc_vport *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static void lpfc_disc_flush_list(struct lpfc_vport *vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static int lpfc_fcf_inuse(struct lpfc_hba *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) lpfc_terminate_rport_io(struct fc_rport *rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	struct lpfc_rport_data *rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	struct lpfc_nodelist * ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	struct lpfc_hba *phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	rdata = rport->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	ndlp = rdata->pnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 			printk(KERN_ERR "Cannot find remote node"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 			" to terminate I/O Data x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 			rport->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	phba  = ndlp->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 		"rport terminate: sid:x%x did:x%x flg:x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	if (ndlp->nlp_sid != NLP_NO_SID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		lpfc_sli_abort_iocb(ndlp->vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 			&phba->sli.sli3_ring[LPFC_FCP_RING],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 			ndlp->nlp_sid, 0, LPFC_CTX_TGT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108)  * This function will be called when dev_loss_tmo fire.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	struct lpfc_rport_data *rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	struct lpfc_nodelist * ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	struct lpfc_vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	struct Scsi_Host *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	struct lpfc_hba   *phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	struct lpfc_work_evt *evtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	int  put_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	int  put_rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	rdata = rport->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	ndlp = rdata->pnode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	vport = ndlp->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	phba  = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 		"rport devlosscb: sid:x%x did:x%x flg:x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 			 "3181 dev_loss_callbk x%06x, rport x%px flg x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 			 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	/* Don't defer this if we are in the process of deleting the vport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	 * or unloading the driver. The unload will cleanup the node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	 * appropriately we just need to cleanup the ndlp rport info here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	if (vport->load_flag & FC_UNLOADING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		put_node = rdata->pnode != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		put_rport = ndlp->rport != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		rdata->pnode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		ndlp->rport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		if (put_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 			lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		if (put_rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 			put_device(&rport->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 				 "6789 rport name %llx != node port name %llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 				 rport->port_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 				 wwn_to_u64(ndlp->nlp_portname.u.wwn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	evtp = &ndlp->dev_loss_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	if (!list_empty(&evtp->evt_listp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 				 "6790 rport name %llx dev_loss_evt pending",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 				 rport->port_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	spin_lock_irqsave(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	ndlp->nlp_flag |= NLP_IN_DEV_LOSS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	spin_unlock_irqrestore(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	/* We need to hold the node by incrementing the reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	 * count until this queued work is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	spin_lock_irqsave(&phba->hbalock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	if (evtp->evt_arg1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		evtp->evt = LPFC_EVT_DEV_LOSS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		list_add_tail(&evtp->evt_listp, &phba->work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		lpfc_worker_wake_up(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	spin_unlock_irqrestore(&phba->hbalock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  * @ndlp: Pointer to remote node object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  * This function is called from the worker thread when devloss timeout timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  * expires. For SLI4 host, this routine shall return 1 when at lease one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  * remote node, including this @ndlp, is still in use of FCF; otherwise, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  * routine shall return 0 when there is no remote node is still in use of FCF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  * when devloss timeout happened to this @ndlp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	struct lpfc_rport_data *rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	struct fc_rport   *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	struct lpfc_vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	struct lpfc_hba   *phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	struct Scsi_Host  *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	uint8_t *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	int  put_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	int warn_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	int fcf_inuse = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	rport = ndlp->rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	vport = ndlp->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	spin_lock_irqsave(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	spin_unlock_irqrestore(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	if (!rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		return fcf_inuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	name = (uint8_t *) &ndlp->nlp_portname;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	phba  = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	if (phba->sli_rev == LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		fcf_inuse = lpfc_fcf_inuse(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		"rport devlosstmo:did:x%x type:x%x id:x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 			 "3182 dev_loss_tmo_handler x%06x, rport x%px flg x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 			 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	 * lpfc_nlp_remove if reached with dangling rport drops the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	 * reference. To make sure that does not happen clear rport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	 * pointer in ndlp before lpfc_nlp_put.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	rdata = rport->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	/* Don't defer this if we are in the process of deleting the vport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	 * or unloading the driver. The unload will cleanup the node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	 * appropriately we just need to cleanup the ndlp rport info here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	if (vport->load_flag & FC_UNLOADING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		if (ndlp->nlp_sid != NLP_NO_SID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 			/* flush the target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 			lpfc_sli_abort_iocb(vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 					    &phba->sli.sli3_ring[LPFC_FCP_RING],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 					    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		put_node = rdata->pnode != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		rdata->pnode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		ndlp->rport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		if (put_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 			lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		put_device(&rport->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		return fcf_inuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 				 "0284 Devloss timeout Ignored on "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 				 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 				 "NPort x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 				 *name, *(name+1), *(name+2), *(name+3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 				 *(name+4), *(name+5), *(name+6), *(name+7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 				 ndlp->nlp_DID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		return fcf_inuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	put_node = rdata->pnode != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	rdata->pnode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	ndlp->rport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	if (put_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	put_device(&rport->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	if (ndlp->nlp_type & NLP_FABRIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		return fcf_inuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	if (ndlp->nlp_sid != NLP_NO_SID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		warn_on = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		lpfc_sli_abort_iocb(vport, &phba->sli.sli3_ring[LPFC_FCP_RING],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 				    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	if (warn_on) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 				 "0203 Devloss timeout on "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 				 "NPort x%06x Data: x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 				 *name, *(name+1), *(name+2), *(name+3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 				 *(name+4), *(name+5), *(name+6), *(name+7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 				 ndlp->nlp_DID, ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 				 ndlp->nlp_state, ndlp->nlp_rpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 				 "0204 Devloss timeout on "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 				 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 				 "NPort x%06x Data: x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 				 *name, *(name+1), *(name+2), *(name+3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 				 *(name+4), *(name+5), *(name+6), *(name+7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 				 ndlp->nlp_DID, ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 				 ndlp->nlp_state, ndlp->nlp_rpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	    !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	    (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	    (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	    (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	return fcf_inuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  * @phba: Pointer to hba context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  * @nlp_did: remote node identifer with devloss timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  * This function is called from the worker thread after invoking devloss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  * timeout handler and releasing the reference count for the ndlp with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  * which the devloss timeout was handled for SLI4 host. For the devloss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  * timeout of the last remote node which had been in use of FCF, when this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  * routine is invoked, it shall be guaranteed that none of the remote are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  * in-use of FCF. When devloss timeout to the last remote using the FCF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  * if the FIP engine is neither in FCF table scan process nor roundrobin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  * failover process, the in-use FCF shall be unregistered. If the FIP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)  * engine is in FCF discovery process, the devloss timeout state shall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  * be set for either the FCF table scan process or roundrobin failover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  * process to unregister the in-use FCF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 				    uint32_t nlp_did)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	/* If devloss timeout happened to a remote node when FCF had no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	 * longer been in-use, do nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	if (!fcf_inuse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 			if (phba->hba_flag & HBA_DEVLOSS_TMO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 				spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 			phba->hba_flag |= HBA_DEVLOSS_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 					"2847 Last remote node (x%x) using "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 					"FCF devloss tmo\n", nlp_did);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 			spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 					"2868 Devloss tmo to FCF rediscovery "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 					"in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 					"2869 Devloss tmo to idle FIP engine, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 					"unreg in-use FCF and rescan.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 			/* Unregister in-use FCF and rescan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 			lpfc_unregister_fcf_rescan(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		if (phba->hba_flag & FCF_TS_INPROG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 					"2870 FCF table scan in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		if (phba->hba_flag & FCF_RR_INPROG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 					"2871 FLOGI roundrobin FCF failover "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 					"in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	lpfc_unregister_unused_fcf(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397)  * lpfc_alloc_fast_evt - Allocates data structure for posting event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398)  * @phba: Pointer to hba context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400)  * This function is called from the functions which need to post
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401)  * events from interrupt context. This function allocates data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402)  * structure required for posting event. It also keeps track of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403)  * number of events pending and prevent event storm when there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404)  * too many events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) struct lpfc_fast_path_event *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	struct lpfc_fast_path_event *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	/* If there are lot of fast event do not exhaust memory due to this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	ret = kzalloc(sizeof(struct lpfc_fast_path_event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 			GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		atomic_inc(&phba->fast_event_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		INIT_LIST_HEAD(&ret->work_evt.evt_listp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425)  * lpfc_free_fast_evt - Frees event data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426)  * @phba: Pointer to hba context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427)  * @evt:  Event object which need to be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429)  * This function frees the data structure required for posting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430)  * events.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) lpfc_free_fast_evt(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		struct lpfc_fast_path_event *evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	atomic_dec(&phba->fast_event_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	kfree(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441)  * lpfc_send_fastpath_evt - Posts events generated from fast path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442)  * @phba: Pointer to hba context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443)  * @evtp: Event data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445)  * This function is called from worker thread, when the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446)  * context need to post an event. This function posts the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447)  * to fc transport netlink interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) lpfc_send_fastpath_evt(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		struct lpfc_work_evt *evtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	unsigned long evt_category, evt_sub_category;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	struct lpfc_fast_path_event *fast_evt_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	char *evt_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	uint32_t evt_data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	struct Scsi_Host *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		work_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	evt_sub_category = (unsigned long) fast_evt_data->un.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			fabric_evt.subcategory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	shost = lpfc_shost_from_vport(fast_evt_data->vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	if (evt_category == FC_REG_FABRIC_EVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			evt_data = (char *) &fast_evt_data->un.read_check_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			evt_data_size = sizeof(fast_evt_data->un.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 				read_check_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		} else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			(evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 			evt_data = (char *) &fast_evt_data->un.fabric_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 			lpfc_free_fast_evt(phba, fast_evt_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	} else if (evt_category == FC_REG_SCSI_EVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		switch (evt_sub_category) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		case LPFC_EVENT_QFULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		case LPFC_EVENT_DEVBSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 			evt_data = (char *) &fast_evt_data->un.scsi_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 			evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		case LPFC_EVENT_CHECK_COND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 			evt_data = (char *) &fast_evt_data->un.check_cond_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 			evt_data_size =  sizeof(fast_evt_data->un.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 				check_cond_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		case LPFC_EVENT_VARQUEDEPTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 			evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			evt_data_size = sizeof(fast_evt_data->un.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 				queue_depth_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 			lpfc_free_fast_evt(phba, fast_evt_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		lpfc_free_fast_evt(phba, fast_evt_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	if (phba->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		fc_host_post_vendor_event(shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			fc_get_event_number(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 			evt_data_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 			evt_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 			LPFC_NL_VENDOR_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	lpfc_free_fast_evt(phba, fast_evt_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) lpfc_work_list_done(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	struct lpfc_work_evt  *evtp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	struct lpfc_nodelist  *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	int free_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	int fcf_inuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	uint32_t nlp_did;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	while (!list_empty(&phba->work_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		list_remove_head((&phba->work_list), evtp, typeof(*evtp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 				 evt_listp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		free_evt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		switch (evtp->evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		case LPFC_EVT_ELS_RETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 			ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 			lpfc_els_retry_delay_handler(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 			free_evt = 0; /* evt is part of ndlp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			/* decrement the node reference count held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			 * for this queued work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		case LPFC_EVT_DEV_LOSS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 			free_evt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 			/* decrement the node reference count held for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			 * this queued work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 			nlp_did = ndlp->nlp_DID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 			lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			if (phba->sli_rev == LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 				lpfc_sli4_post_dev_loss_tmo_handler(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 								    fcf_inuse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 								    nlp_did);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		case LPFC_EVT_RECOVER_PORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 			ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			lpfc_sli_abts_recover_port(ndlp->vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			free_evt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 			/* decrement the node reference count held for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 			 * this queued work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		case LPFC_EVT_ONLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			if (phba->link_state < LPFC_LINK_DOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 				*(int *) (evtp->evt_arg1) = lpfc_online(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 				*(int *) (evtp->evt_arg1) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 			complete((struct completion *)(evtp->evt_arg2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 		case LPFC_EVT_OFFLINE_PREP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			if (phba->link_state >= LPFC_LINK_DOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 				lpfc_offline_prep(phba, LPFC_MBX_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 			*(int *)(evtp->evt_arg1) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			complete((struct completion *)(evtp->evt_arg2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		case LPFC_EVT_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 			lpfc_offline(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			lpfc_sli_brdrestart(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 			*(int *)(evtp->evt_arg1) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 				lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 			lpfc_unblock_mgmt_io(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			complete((struct completion *)(evtp->evt_arg2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		case LPFC_EVT_WARM_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 			lpfc_offline(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 			lpfc_reset_barrier(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 			lpfc_sli_brdreset(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 			lpfc_hba_down_post(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 			*(int *)(evtp->evt_arg1) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 				lpfc_sli_brdready(phba, HS_MBRDY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			lpfc_unblock_mgmt_io(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 			complete((struct completion *)(evtp->evt_arg2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 		case LPFC_EVT_KILL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 			lpfc_offline(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 			*(int *)(evtp->evt_arg1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 				= (phba->pport->stopped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 				        ? 0 : lpfc_sli_brdkill(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 			lpfc_unblock_mgmt_io(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 			complete((struct completion *)(evtp->evt_arg2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		case LPFC_EVT_FASTPATH_MGMT_EVT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 			lpfc_send_fastpath_evt(phba, evtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			free_evt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		case LPFC_EVT_RESET_HBA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			if (!(phba->pport->load_flag & FC_UNLOADING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 				lpfc_reset_hba(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		if (free_evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 			kfree(evtp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) lpfc_work_done(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	struct lpfc_sli_ring *pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	uint32_t ha_copy, status, control, work_port_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	struct lpfc_vport **vports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	struct lpfc_vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	ha_copy = phba->work_ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	phba->work_ha = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	/* First, try to post the next mailbox command to SLI4 device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		lpfc_sli4_post_async_mbox(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	if (ha_copy & HA_ERATT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		/* Handle the error attention event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		lpfc_handle_eratt(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		if (phba->fw_dump_cmpl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			complete(phba->fw_dump_cmpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			phba->fw_dump_cmpl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	if (ha_copy & HA_MBATT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		lpfc_sli_handle_mb_event(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	if (ha_copy & HA_LATT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		lpfc_handle_latt(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	/* Process SLI4 events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		if (phba->hba_flag & HBA_RRQ_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 			lpfc_handle_rrq_active(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			lpfc_sli4_els_xri_abort_event_proc(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		if (phba->hba_flag & ASYNC_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			lpfc_sli4_async_event_proc(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 			spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 			spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 			lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 			lpfc_sli4_fcf_redisc_event_proc(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	vports = lpfc_create_vport_work_array(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	if (vports != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		for (i = 0; i <= phba->max_vports; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 			 * We could have no vports in array if unloading, so if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			 * this happens then just use the pport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			if (vports[i] == NULL && i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 				vport = phba->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 				vport = vports[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 			if (vport == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 			spin_lock_irq(&vport->work_port_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			work_port_events = vport->work_port_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 			vport->work_port_events &= ~work_port_events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			spin_unlock_irq(&vport->work_port_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			if (work_port_events & WORKER_DISC_TMO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 				lpfc_disc_timeout_handler(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			if (work_port_events & WORKER_ELS_TMO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 				lpfc_els_timeout_handler(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 			if (work_port_events & WORKER_HB_TMO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 				lpfc_hb_timeout_handler(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			if (work_port_events & WORKER_MBOX_TMO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 				lpfc_mbox_timeout_handler(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 			if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 				lpfc_unblock_fabric_iocbs(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 			if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 				lpfc_ramp_down_queue_handler(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 			if (work_port_events & WORKER_DELAYED_DISC_TMO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 				lpfc_delayed_disc_timeout_handler(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	lpfc_destroy_vport_work_array(phba, vports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	pring = lpfc_phba_elsring(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	status = (ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	status >>= (4*LPFC_ELS_RING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	if (pring && (status & HA_RXMASK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		      pring->flag & LPFC_DEFERRED_RING_EVENT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		      phba->hba_flag & HBA_SP_QUEUE_EVT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		if (pring->flag & LPFC_STOP_IOCB_EVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 			/* Preserve legacy behavior. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			if (!(phba->hba_flag & HBA_SP_QUEUE_EVT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 				set_bit(LPFC_DATA_READY, &phba->data_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 			/* Driver could have abort request completed in queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			 * when link goes down.  Allow for this transition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			if (phba->link_state >= LPFC_LINK_DOWN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			    phba->link_flag & LS_MDS_LOOPBACK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 				pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 				lpfc_sli_handle_slow_ring_event(phba, pring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 								(status &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 								HA_RXMASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		if (phba->sli_rev == LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 			lpfc_drain_txq(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		 * Turn on Ring interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		if (phba->sli_rev <= LPFC_SLI_REV3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 			spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 			control = readl(phba->HCregaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 				lpfc_debugfs_slow_ring_trc(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 					"WRK Enable ring: cntl:x%x hacopy:x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 					control, ha_copy, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 				control |= (HC_R0INT_ENA << LPFC_ELS_RING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 				writel(control, phba->HCregaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 				readl(phba->HCregaddr); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 				lpfc_debugfs_slow_ring_trc(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 					"WRK Ring ok:     cntl:x%x hacopy:x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 					control, ha_copy, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	lpfc_work_list_done(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) lpfc_do_work(void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	struct lpfc_hba *phba = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	set_user_nice(current, MIN_NICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	current->flags |= PF_NOFREEZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	phba->data_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	while (!kthread_should_stop()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		/* wait and check worker queue activities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		rc = wait_event_interruptible(phba->work_waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 					(test_and_clear_bit(LPFC_DATA_READY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 							    &phba->data_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 					 || kthread_should_stop()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		/* Signal wakeup shall terminate the worker thread */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 					"0433 Wakeup on signal: rc=x%x\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		/* Attend pending lpfc data processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		lpfc_work_done(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	phba->worker_thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 			"0432 Worker thread stopped.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789)  * This is only called to handle FC worker events. Since this a rare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790)  * occurrence, we allocate a struct lpfc_work_evt structure here instead of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791)  * embedding it in the IOCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		      uint32_t evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	struct lpfc_work_evt  *evtp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	 * be queued to worker thread for processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	if (!evtp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	evtp->evt_arg1  = arg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	evtp->evt_arg2  = arg2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	evtp->evt       = evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	spin_lock_irqsave(&phba->hbalock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	list_add_tail(&evtp->evt_listp, &phba->work_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	spin_unlock_irqrestore(&phba->hbalock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	lpfc_worker_wake_up(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	struct lpfc_hba  *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	struct lpfc_nodelist *ndlp, *next_ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		if (!NLP_CHK_NODE_ACT(ndlp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			((vport->port_type == LPFC_NPIV_PORT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			(ndlp->nlp_DID == NameServer_DID)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 			lpfc_unreg_rpi(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		/* Leave Fabric nodes alone on link down */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		if ((phba->sli_rev < LPFC_SLI_REV4) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		    (!remove && ndlp->nlp_type & NLP_FABRIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		/* Notify transport of connectivity loss to trigger cleanup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		if (phba->nvmet_support &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		    ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 			lpfc_nvmet_invalidate_host(phba, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		lpfc_disc_state_machine(vport, ndlp, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 					remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 					? NLP_EVT_DEVICE_RM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 					: NLP_EVT_DEVICE_RECOVERY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		if (phba->sli_rev == LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 			lpfc_sli4_unreg_all_rpis(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		lpfc_mbx_unreg_vpi(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) lpfc_port_link_failure(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	/* Cleanup any outstanding received buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	lpfc_cleanup_rcv_buffers(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	/* Cleanup any outstanding RSCN activity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	lpfc_els_flush_rscn(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	/* Cleanup any outstanding ELS commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	lpfc_els_flush_cmd(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	lpfc_cleanup_rpis(vport, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	/* Turn off discovery timer if its running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	lpfc_can_disctmo(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) lpfc_linkdown_port(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		fc_host_post_event(shost, fc_get_event_number(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 				   FCH_EVT_LINKDOWN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		"Link Down:       state:x%x rtry:x%x flg:x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	lpfc_port_link_failure(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	/* Stop delayed Nport discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	vport->fc_flag &= ~FC_DISC_DELAYED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	del_timer_sync(&vport->delayed_disc_tmo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) lpfc_linkdown(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	struct lpfc_vport *vport = phba->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	struct lpfc_vport **vports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	LPFC_MBOXQ_t          *mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if (phba->link_state == LPFC_LINK_DOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	/* Block all SCSI stack I/Os */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	lpfc_scsi_dev_block(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	phba->defer_flogi_acc_flag = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	if (phba->link_state > LPFC_LINK_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		phba->link_state = LPFC_LINK_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		if (phba->sli4_hba.conf_trunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 			phba->trunk_link.link0.state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 			phba->trunk_link.link1.state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			phba->trunk_link.link2.state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 			phba->trunk_link.link3.state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			phba->sli4_hba.link_state.logical_speed =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 						LPFC_LINK_SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		phba->pport->fc_flag &= ~FC_LBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	vports = lpfc_create_vport_work_array(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	if (vports != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 			/* Issue a LINK DOWN event to all nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			lpfc_linkdown_port(vports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 			vports[i]->fc_myDID = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 			if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 			    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 				if (phba->nvmet_support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 					lpfc_nvmet_update_targetport(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 					lpfc_nvme_update_localport(vports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	lpfc_destroy_vport_work_array(phba, vports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	/* Clean up any SLI3 firmware default rpi's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	if (phba->sli_rev > LPFC_SLI_REV3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		goto skip_unreg_did;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	if (mb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 		lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		mb->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		    == MBX_NOT_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			mempool_free(mb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973)  skip_unreg_did:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	/* Setup myDID for link up if we are in pt2pt mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	if (phba->pport->fc_flag & FC_PT2PT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		if (mb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 			lpfc_config_link(phba, mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			mb->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			    == MBX_NOT_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 				mempool_free(mb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		phba->pport->rcv_flogi_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		if (!NLP_CHK_NODE_ACT(ndlp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		if (ndlp->nlp_type & NLP_FABRIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 			/* On Linkup its safe to clean up the ndlp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 			 * from Fabric connections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 			if (ndlp->nlp_DID != Fabric_DID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 				lpfc_unreg_rpi(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 			/* Fail outstanding IO now since device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 			 * marked for PLOGI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			lpfc_unreg_rpi(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) lpfc_linkup_port(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	struct lpfc_hba  *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	if ((vport->load_flag & FC_UNLOADING) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		"Link Up:         top:x%x speed:x%x flg:x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	/* If NPIV is not enabled, only bring the physical port up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		(vport != phba->pport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		fc_host_post_event(shost, fc_get_event_number(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 				   FCH_EVT_LINKUP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			    FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	vport->fc_flag |= FC_NDISC_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	vport->fc_ns_retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	if (vport->fc_flag & FC_LBIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		lpfc_linkup_cleanup_nodes(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) lpfc_linkup(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	struct lpfc_vport **vports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	struct Scsi_Host  *shost = lpfc_shost_from_vport(phba->pport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	phba->link_state = LPFC_LINK_UP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	/* Unblock fabric iocbs if they are blocked */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	del_timer_sync(&phba->fabric_block_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	vports = lpfc_create_vport_work_array(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	if (vports != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			lpfc_linkup_port(vports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	lpfc_destroy_vport_work_array(phba, vports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	/* Clear the pport flogi counter in case the link down was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	 * absorbed without an ACQE. No lock here - in worker thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	 * and discovery is synchronized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	phba->pport->rcv_flogi_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	/* reinitialize initial FLOGI flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	phba->hba_flag &= ~(HBA_FLOGI_ISSUED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	phba->defer_flogi_acc_flag = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)  * This routine handles processing a CLEAR_LA mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)  * command upon completion. It is setup in the LPFC_MBOXQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)  * as the completion routine when the command is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)  * handed off to the SLI layer. SLI3 only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	struct lpfc_vport *vport = pmb->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	struct lpfc_sli   *psli = &phba->sli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	MAILBOX_t *mb = &pmb->u.mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	uint32_t control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	/* Since we don't do discovery right now, turn these off here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	psli->sli3_ring[LPFC_EXTRA_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	psli->sli3_ring[LPFC_FCP_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	/* Check for error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		/* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 				 "0320 CLEAR_LA mbxStatus error x%x hba "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 				 "state x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 				 mb->mbxStatus, vport->port_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		phba->link_state = LPFC_HBA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	if (vport->port_type == LPFC_PHYSICAL_PORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		phba->link_state = LPFC_HBA_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	psli->sli_flag |= LPFC_PROCESS_LA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	control = readl(phba->HCregaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	control |= HC_LAINT_ENA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	writel(control, phba->HCregaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	readl(phba->HCregaddr); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	/* Device Discovery completes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 			 "0225 Device Discovery completes\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	vport->fc_flag &= ~FC_ABORT_DISCOVERY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	lpfc_can_disctmo(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	/* turn on Link Attention interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	psli->sli_flag |= LPFC_PROCESS_LA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	control = readl(phba->HCregaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	control |= HC_LAINT_ENA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	writel(control, phba->HCregaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	readl(phba->HCregaddr); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	struct lpfc_vport *vport = pmb->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	LPFC_MBOXQ_t *sparam_mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	struct lpfc_dmabuf *sparam_mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	u16 status = pmb->u.mb.mbxStatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	/* don't perform discovery for SLI4 loopback diagnostic test */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	    !(phba->hba_flag & HBA_FCOE_MODE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	    (phba->link_flag & LS_LOOPBACK_MODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	    vport->fc_flag & FC_PUBLIC_LOOP &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	    !(vport->fc_flag & FC_LBIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 			/* Need to wait for FAN - use discovery timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 			 * for timeout.  port_state is identically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			 * LPFC_LOCAL_CFG_LINK while waiting for FAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 			lpfc_set_disctmo(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	/* Start discovery by sending a FLOGI. port_state is identically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	 * LPFC_FLOGI while waiting for FLOGI cmpl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	if (vport->port_state != LPFC_FLOGI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		/* Issue MBX_READ_SPARAM to update CSPs before FLOGI if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		 * bb-credit recovery is in place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		    !(phba->link_flag & LS_LOOPBACK_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 			sparam_mb = mempool_alloc(phba->mbox_mem_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 						  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 			if (!sparam_mb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 				goto sparam_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 			rc = lpfc_read_sparam(phba, sparam_mb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 			if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 				mempool_free(sparam_mb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 				goto sparam_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 			sparam_mb->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 			sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 			rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 			if (rc == MBX_NOT_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 				sparam_mp = (struct lpfc_dmabuf *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 						sparam_mb->ctx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 				lpfc_mbuf_free(phba, sparam_mp->virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 					       sparam_mp->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 				kfree(sparam_mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 				sparam_mb->ctx_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 				mempool_free(sparam_mb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 				goto sparam_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 			phba->hba_flag |= HBA_DEFER_FLOGI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		}  else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 			lpfc_initial_flogi(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		if (vport->fc_flag & FC_PT2PT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 			lpfc_disc_start(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 			 "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 			 status, vport->port_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) sparam_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	lpfc_linkdown(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 			 "0200 CONFIG_LINK bad hba state x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 			 vport->port_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	lpfc_issue_clear_la(phba, vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248)  * lpfc_sli4_clear_fcf_rr_bmask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)  * @phba: pointer to the struct lpfc_hba for this port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250)  * This fucnction resets the round robin bit mask and clears the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)  * fcf priority list. The list deletions are done while holding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)  * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253)  * from the lpfc_fcf_pri record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	struct lpfc_fcf_pri *fcf_pri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	struct lpfc_fcf_pri *next_fcf_pri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	list_for_each_entry_safe(fcf_pri, next_fcf_pri,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 				&phba->fcf.fcf_pri_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		list_del_init(&fcf_pri->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		fcf_pri->fcf_rec.flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	struct lpfc_vport *vport = mboxq->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	if (mboxq->u.mb.mbxStatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 				 "2017 REG_FCFI mbxStatus error x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 				 "HBA state x%x\n", mboxq->u.mb.mbxStatus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 				 vport->port_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		goto fail_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	/* Start FCoE discovery by sending a FLOGI. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	/* Set the FCFI registered flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	phba->fcf.fcf_flag |= FCF_REGISTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	/* If there is a pending FCoE event, restart FCF table scan. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		goto fail_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	/* Mark successful completion of FCF table scan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	phba->hba_flag &= ~FCF_TS_INPROG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	if (vport->port_state != LPFC_FLOGI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		phba->hba_flag |= FCF_RR_INPROG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		lpfc_issue_init_vfi(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) fail_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	phba->hba_flag &= ~FCF_RR_INPROG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	mempool_free(mboxq, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)  * lpfc_fab_name_match - Check if the fcf fabric name match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)  * @fab_name: pointer to fabric name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318)  * @new_fcf_record: pointer to fcf record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)  * This routine compare the fcf record's fabric name with provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)  * fabric name. If the fabric name are identical this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)  * returns 1 else return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) static uint32_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)  * lpfc_sw_name_match - Check if the fcf switch name match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)  * @sw_name: pointer to switch name.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)  * @new_fcf_record: pointer to fcf record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)  * This routine compare the fcf record's switch name with provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)  * switch name. If the switch name are identical this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353)  * returns 1 else return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) static uint32_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)  * lpfc_mac_addr_match - Check if the fcf mac address match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)  * @mac_addr: pointer to mac address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)  * @new_fcf_record: pointer to fcf record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)  * This routine compare the fcf record's mac address with HBA's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)  * FCF mac address. If the mac addresses are identical this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)  * returns 1 else return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) static uint32_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	return (curr_vlan_id == new_vlan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)  * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)  * @fcf_index: Index for the lpfc_fcf_record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)  * @new_fcf_record: pointer to hba fcf record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)  * This routine updates the driver FCF priority record from the new HBA FCF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)  * record. The hbalock is asserted held in the code path calling this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)  * routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 				 struct fcf_record *new_fcf_record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 				 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	struct lpfc_fcf_pri *fcf_pri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	fcf_pri = &phba->fcf.fcf_pri[fcf_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	fcf_pri->fcf_rec.fcf_index = fcf_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	/* FCF record priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)  * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)  * @fcf_rec: pointer to driver fcf record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437)  * @new_fcf_record: pointer to fcf record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439)  * This routine copies the FCF information from the FCF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)  * record to lpfc_hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		     struct fcf_record *new_fcf_record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	/* Fabric name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	fcf_rec->fabric_name[0] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	fcf_rec->fabric_name[1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	fcf_rec->fabric_name[2] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	fcf_rec->fabric_name[3] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	fcf_rec->fabric_name[4] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	fcf_rec->fabric_name[5] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	fcf_rec->fabric_name[6] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	fcf_rec->fabric_name[7] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	/* Mac address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	/* FCF record index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	/* FCF record priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	fcf_rec->priority = new_fcf_record->fip_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	/* Switch name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	fcf_rec->switch_name[0] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	fcf_rec->switch_name[1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	fcf_rec->switch_name[2] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	fcf_rec->switch_name[3] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	fcf_rec->switch_name[4] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	fcf_rec->switch_name[5] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	fcf_rec->switch_name[6] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	fcf_rec->switch_name[7] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)  * lpfc_update_fcf_record - Update driver fcf record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)  * @fcf_rec: pointer to driver fcf record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)  * @new_fcf_record: pointer to hba fcf record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)  * @addr_mode: address mode to be set to the driver fcf record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)  * @vlan_id: vlan tag to be set to the driver fcf record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)  * @flag: flag bits to be set to the driver fcf record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)  * This routine updates the driver FCF record from the new HBA FCF record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)  * together with the address mode, vlan_id, and other informations. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)  * routine is called with the hbalock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		       struct fcf_record *new_fcf_record, uint32_t addr_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		       uint16_t vlan_id, uint32_t flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	lockdep_assert_held(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	/* Copy the fields from the HBA's FCF record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	/* Update other fields of driver FCF record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	fcf_rec->addr_mode = addr_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	fcf_rec->vlan_id = vlan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	fcf_rec->flag |= (flag | RECORD_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	__lpfc_update_fcf_record_pri(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 				 new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)  * lpfc_register_fcf - Register the FCF with hba.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)  * This routine issues a register fcfi mailbox command to register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529)  * the fcf with HBA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) lpfc_register_fcf(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	LPFC_MBOXQ_t *fcf_mbxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	/* If the FCF is not available do nothing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	/* The FCF is already registered, start discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	if (phba->fcf.fcf_flag & FCF_REGISTERED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		phba->hba_flag &= ~FCF_TS_INPROG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		if (phba->pport->port_state != LPFC_FLOGI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		    phba->pport->fc_flag & FC_FABRIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 			phba->hba_flag |= FCF_RR_INPROG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 			spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 			lpfc_initial_flogi(phba->pport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	if (!fcf_mbxq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	lpfc_reg_fcfi(phba, fcf_mbxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	fcf_mbxq->vport = phba->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	if (rc == MBX_NOT_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		mempool_free(fcf_mbxq, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)  * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)  * @new_fcf_record: pointer to fcf record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)  * @boot_flag: Indicates if this record used by boot bios.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)  * @addr_mode: The address mode to be used by this FCF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)  * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)  * This routine compare the fcf record with connect list obtained from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592)  * config region to decide if this FCF can be used for SAN discovery. It returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)  * 1 if this record can be used for SAN discovery else return zero. If this FCF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594)  * record can be used for SAN discovery, the boot_flag will indicate if this FCF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)  * is used by boot bios and addr_mode will indicate the addressing mode to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)  * used for this FCF when the function returns.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597)  * If the FCF record need to be used with a particular vlan id, the vlan is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)  * set in the vlan_id on return of the function. If not VLAN tagging need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)  * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 			struct fcf_record *new_fcf_record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 			uint32_t *boot_flag, uint32_t *addr_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 			uint16_t *vlan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	struct lpfc_fcf_conn_entry *conn_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	int i, j, fcf_vlan_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	/* Find the lowest VLAN id in the FCF record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	for (i = 0; i < 512; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		if (new_fcf_record->vlan_bitmap[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 			fcf_vlan_id = i * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 			j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 			while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 				j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 				fcf_vlan_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	/* FCF not valid/available or solicitation in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	    !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	    bf_get(lpfc_fcf_record_fcf_sol, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		*boot_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 				new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		if (phba->valid_vlan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 			*vlan_id = phba->vlan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 			*vlan_id = LPFC_FCOE_NULL_VID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	 * If there are no FCF connection table entry, driver connect to all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	 * FCFs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	if (list_empty(&phba->fcf_conn_rec_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		*boot_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 			new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		 * When there are no FCF connect entries, use driver's default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 		 * addressing mode - FPMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		if (*addr_mode & LPFC_FCF_FPMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 			*addr_mode = LPFC_FCF_FPMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		/* If FCF record report a vlan id use that vlan id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		if (fcf_vlan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 			*vlan_id = fcf_vlan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 			*vlan_id = LPFC_FCOE_NULL_VID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	list_for_each_entry(conn_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 			    &phba->fcf_conn_rec_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 			!lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 					     new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 			!lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 					    new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 			 * If the vlan bit map does not have the bit set for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 			 * vlan id to be used, then it is not a match.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 			if (!(new_fcf_record->vlan_bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 				[conn_entry->conn_rec.vlan_tag / 8] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 				(1 << (conn_entry->conn_rec.vlan_tag % 8))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		 * If connection record does not support any addressing mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		 * skip the FCF record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 			& (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		 * Check if the connection record specifies a required
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		 * addressing mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 		if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 			!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 			 * If SPMA required but FCF not support this continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 			if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 				!(bf_get(lpfc_fcf_record_mac_addr_prov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 					new_fcf_record) & LPFC_FCF_SPMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 			 * If FPMA required but FCF not support this continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 			if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 				!(bf_get(lpfc_fcf_record_mac_addr_prov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 				new_fcf_record) & LPFC_FCF_FPMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		 * This fcf record matches filtering criteria.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 		if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 			*boot_flag = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 			*boot_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 		 * If user did not specify any addressing mode, or if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 		 * preferred addressing mode specified by user is not supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 		 * by FCF, allow fabric to pick the addressing mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		*addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 				new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		 * If the user specified a required address mode, assign that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		 * address mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 			(!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 			*addr_mode = (conn_entry->conn_rec.flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 				FCFCNCT_AM_SPMA) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 				LPFC_FCF_SPMA : LPFC_FCF_FPMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		 * If the user specified a preferred address mode, use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		 * addr mode only if FCF support the addr_mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 			(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 			(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 			(*addr_mode & LPFC_FCF_SPMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 				*addr_mode = LPFC_FCF_SPMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 			(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 			!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 			(*addr_mode & LPFC_FCF_FPMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 				*addr_mode = LPFC_FCF_FPMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		/* If matching connect list has a vlan id, use it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 			*vlan_id = conn_entry->conn_rec.vlan_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 		 * If no vlan id is specified in connect list, use the vlan id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		 * in the FCF record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 		else if (fcf_vlan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 			*vlan_id = fcf_vlan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 			*vlan_id = LPFC_FCOE_NULL_VID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)  * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)  * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782)  * This function check if there is any fcoe event pending while driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)  * scan FCF entries. If there is any pending event, it will restart the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784)  * FCF saning and return 1 else return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	 * If the Link is up and no FCoE events while in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	 * FCF discovery, no need to restart FCF discovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	if ((phba->link_state  >= LPFC_LINK_UP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	    (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 			"2768 Pending link or FCF event during current "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 			"handling of the previous event: link_state:x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 			"evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 			phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 			phba->fcoe_eventtag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	if (phba->link_state >= LPFC_LINK_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 				"2780 Restart FCF table scan due to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 				"pending FCF event:evt_tag_at_scan:x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 				"evt_tag_current:x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 				phba->fcoe_eventtag_at_fcf_scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 				phba->fcoe_eventtag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 		 * Do not continue FCF discovery and clear FCF_TS_INPROG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		 * flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 				"2833 Stop FCF discovery process due to link "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 				"state change (x%x)\n", phba->link_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	/* Unregister the currently registered FCF if required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	if (unreg_fcf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 		spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		phba->fcf.fcf_flag &= ~FCF_REGISTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		lpfc_sli4_unregister_fcf(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)  * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)  * @fcf_cnt: number of eligible fcf record seen so far.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)  * This function makes an running random selection decision on FCF record to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)  * use through a sequence of @fcf_cnt eligible FCF records with equal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847)  * probability. To perform integer manunipulation of random numbers with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)  * size unit32_t, the lower 16 bits of the 32-bit random number returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)  * from prandom_u32() are taken as the random random number generated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851)  * Returns true when outcome is for the newly read FCF record should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)  * chosen; otherwise, return false when outcome is for keeping the previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853)  * chosen FCF record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	uint32_t rand_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	/* Get 16-bit uniform random number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 	rand_num = 0xFFFF & prandom_u32();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	/* Decision with probability 1/fcf_cnt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 	if ((fcf_cnt * rand_num) < 0xFFFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)  * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)  * @mboxq: pointer to mailbox object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874)  * @next_fcf_index: pointer to holder of next fcf index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876)  * This routine parses the non-embedded fcf mailbox command by performing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877)  * necessarily error checking, non-embedded read FCF record mailbox command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)  * SGE parsing, and endianness swapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)  * Returns the pointer to the new FCF record in the non-embedded mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881)  * command DMA memory if successfully, other NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) static struct fcf_record *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 			     uint16_t *next_fcf_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	void *virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	struct lpfc_mbx_sge sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	struct lpfc_mbx_read_fcf_tbl *read_fcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	uint32_t shdr_status, shdr_add_status, if_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	union lpfc_sli4_cfg_shdr *shdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	struct fcf_record *new_fcf_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	/* Get the first SGE entry from the non-embedded DMA memory. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	 * routine only uses a single SGE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	if (unlikely(!mboxq->sge_array)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 				"2524 Failed to get the non-embedded SGE "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 				"virtual address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	virt_addr = mboxq->sge_array->addr[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	lpfc_sli_pcimem_bcopy(shdr, shdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 			      sizeof(union lpfc_sli4_cfg_shdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	if (shdr_status || shdr_add_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		if (shdr_status == STATUS_FCF_TABLE_EMPTY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 					if_type == LPFC_SLI_INTF_IF_TYPE_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 			lpfc_printf_log(phba, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 					LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 					"2726 READ_FCF_RECORD Indicates empty "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 					"FCF table.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 					"2521 READ_FCF_RECORD mailbox failed "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 					"with status x%x add_status x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 					"mbx\n", shdr_status, shdr_add_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	/* Interpreting the returned information of the FCF record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 			      sizeof(struct lpfc_mbx_read_fcf_tbl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	*next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	new_fcf_record = (struct fcf_record *)(virt_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 			  sizeof(struct lpfc_mbx_read_fcf_tbl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 				offsetof(struct fcf_record, vlan_bitmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	return new_fcf_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943)  * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945)  * @fcf_record: pointer to the fcf record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)  * @vlan_id: the lowest vlan identifier associated to this fcf record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)  * @next_fcf_index: the index to the next fcf record in hba's fcf table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)  * This routine logs the detailed FCF record if the LOG_FIP loggin is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)  * enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 			      struct fcf_record *fcf_record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 			      uint16_t vlan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 			      uint16_t next_fcf_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 			"2764 READ_FCF_RECORD:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 			"\tFCF_Index     : x%x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 			"\tFCF_Avail     : x%x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 			"\tFCF_Valid     : x%x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 			"\tFCF_SOL       : x%x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 			"\tFIP_Priority  : x%x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 			"\tMAC_Provider  : x%x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 			"\tLowest VLANID : x%x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 			"\tFCF_MAC Addr  : x%x:%x:%x:%x:%x:%x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 			"\tFabric_Name   : x%x:%x:%x:%x:%x:%x:%x:%x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 			"\tSwitch_Name   : x%x:%x:%x:%x:%x:%x:%x:%x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 			"\tNext_FCF_Index: x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 			bf_get(lpfc_fcf_record_fcf_index, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 			bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 			bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 			bf_get(lpfc_fcf_record_fcf_sol, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 			fcf_record->fip_priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 			bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 			vlan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 			bf_get(lpfc_fcf_record_mac_0, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 			bf_get(lpfc_fcf_record_mac_1, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 			bf_get(lpfc_fcf_record_mac_2, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 			bf_get(lpfc_fcf_record_mac_3, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 			bf_get(lpfc_fcf_record_mac_4, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 			bf_get(lpfc_fcf_record_mac_5, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 			bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 			bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 			bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 			bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 			bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 			bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 			bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 			bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 			bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 			bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 			bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 			bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 			bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 			bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 			bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 			bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 			next_fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)  * lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006)  * @fcf_rec: pointer to an existing FCF record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)  * @new_fcf_record: pointer to a new FCF record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)  * @new_vlan_id: vlan id from the new FCF record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)  * This function performs matching test of a new FCF record against an existing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)  * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012)  * will not be used as part of the FCF record matching criteria.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)  * Returns true if all the fields matching, otherwise returns false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) static bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 			   struct lpfc_fcf_rec *fcf_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 			   struct fcf_record *new_fcf_record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 			   uint16_t new_vlan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 		if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	if (fcf_rec->priority != new_fcf_record->fip_priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)  * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)  * @vport: Pointer to vport object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)  * @fcf_index: index to next fcf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041)  * This function processing the roundrobin fcf failover to next fcf index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)  * When this function is invoked, there will be a current fcf registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)  * for flogi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)  * Return: 0 for continue retrying flogi on currently registered fcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045)  *         1 for stop flogi on currently registered fcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	struct lpfc_hba *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 		spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		if (phba->hba_flag & HBA_DEVLOSS_TMO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 			spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 					"2872 Devloss tmo with no eligible "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 					"FCF, unregister in-use FCF (x%x) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 					"and rescan FCF table\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 					phba->fcf.current_rec.fcf_indx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 			lpfc_unregister_fcf_rescan(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 			goto stop_flogi_current_fcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		/* Mark the end to FLOGI roundrobin failover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		phba->hba_flag &= ~FCF_RR_INPROG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		/* Allow action to new fcf asynchronous event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 				"2865 No FCF available, stop roundrobin FCF "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 				"failover and change port state:x%x/x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 				phba->pport->port_state, LPFC_VPORT_UNKNOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		if (!phba->fcf.fcf_redisc_attempted) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 			lpfc_unregister_fcf(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 			rc = lpfc_sli4_redisc_fcf_table(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 			if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 						"3195 Rediscover FCF table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 				phba->fcf.fcf_redisc_attempted = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 				lpfc_sli4_clear_fcf_rr_bmask(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 						"3196 Rediscover FCF table "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 						"failed. Status:x%x\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 					"3197 Already rediscover FCF table "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 					"attempted. No more retry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 		goto stop_flogi_current_fcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 				"2794 Try FLOGI roundrobin FCF failover to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 				"(x%x)\n", fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 		rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 					"2761 FLOGI roundrobin FCF failover "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 					"failed (rc:x%x) to read FCF (x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 					rc, phba->fcf.current_rec.fcf_indx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 			goto stop_flogi_current_fcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) stop_flogi_current_fcf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	lpfc_can_disctmo(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)  * lpfc_sli4_fcf_pri_list_del
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)  * @fcf_index: the index of the fcf record to delete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)  * This routine checks the on list flag of the fcf_index to be deleted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)  * If it is one the list then it is removed from the list, and the flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)  * is cleared. This routine grab the hbalock before removing the fcf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)  * record from the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 			uint16_t fcf_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	struct lpfc_fcf_pri *new_fcf_pri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		"3058 deleting idx x%x pri x%x flg x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		fcf_index, new_fcf_pri->fcf_rec.priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		 new_fcf_pri->fcf_rec.flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		if (phba->fcf.current_rec.priority ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 				new_fcf_pri->fcf_rec.priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 			phba->fcf.eligible_fcf_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		list_del_init(&new_fcf_pri->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)  * lpfc_sli4_set_fcf_flogi_fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)  * @fcf_index: the index of the fcf record to update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149)  * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150)  * flag so the the round robin slection for the particular priority level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)  * will try a different fcf record that does not have this bit set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)  * If the fcf record is re-read for any reason this flag is cleared brfore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153)  * adding it to the priority list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	struct lpfc_fcf_pri *new_fcf_pri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)  * lpfc_sli4_fcf_pri_list_add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168)  * @fcf_index: the index of the fcf record to add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)  * @new_fcf_record: pointer to a new FCF record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)  * This routine checks the priority of the fcf_index to be added.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)  * If it is a lower priority than the current head of the fcf_pri list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172)  * then it is added to the list in the right order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)  * If it is the same priority as the current head of the list then it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174)  * is added to the head of the list and its bit in the rr_bmask is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)  * If the fcf_index to be added is of a higher priority than the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176)  * head of the list then the rr_bmask is cleared, its bit is set in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177)  * rr_bmask and it is added to the head of the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178)  * returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)  * 0=success 1=failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	uint16_t fcf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	struct fcf_record *new_fcf_record)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	uint16_t current_fcf_pri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	uint16_t last_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	struct lpfc_fcf_pri *fcf_pri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	struct lpfc_fcf_pri *next_fcf_pri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	struct lpfc_fcf_pri *new_fcf_pri;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		"3059 adding idx x%x pri x%x flg x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		fcf_index, new_fcf_record->fip_priority,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		 new_fcf_pri->fcf_rec.flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		list_del_init(&new_fcf_pri->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	new_fcf_pri->fcf_rec.fcf_index = fcf_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	if (list_empty(&phba->fcf.fcf_pri_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 		ret = lpfc_sli4_fcf_rr_index_set(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 				new_fcf_pri->fcf_rec.fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 				LPFC_SLI4_FCF_TBL_INDX_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 		ret = 0; /* Empty rr list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	if (new_fcf_pri->fcf_rec.priority <=  current_fcf_pri) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		if (new_fcf_pri->fcf_rec.priority <  current_fcf_pri) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 			memset(phba->fcf.fcf_rr_bmask, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 				sizeof(*phba->fcf.fcf_rr_bmask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 			/* fcfs_at_this_priority_level = 1; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 			phba->fcf.eligible_fcf_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 			/* fcfs_at_this_priority_level++; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 			phba->fcf.eligible_fcf_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 		ret = lpfc_sli4_fcf_rr_index_set(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 				new_fcf_pri->fcf_rec.fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	list_for_each_entry_safe(fcf_pri, next_fcf_pri,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 				&phba->fcf.fcf_pri_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 		if (new_fcf_pri->fcf_rec.priority <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 				fcf_pri->fcf_rec.priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 			if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 				list_add(&new_fcf_pri->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 						&phba->fcf.fcf_pri_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 				list_add(&new_fcf_pri->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 					 &((struct lpfc_fcf_pri *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 					fcf_pri->list.prev)->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 		} else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 			|| new_fcf_pri->fcf_rec.priority <
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 				next_fcf_pri->fcf_rec.priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 			list_add(&new_fcf_pri->list, &fcf_pri->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	/* we use = instead of |= to clear the FLOGI_FAILED flag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264)  * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266)  * @mboxq: pointer to mailbox object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)  * This function iterates through all the fcf records available in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269)  * HBA and chooses the optimal FCF record for discovery. After finding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)  * the FCF for discovery it registers the FCF record and kicks start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271)  * discovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272)  * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)  * use an FCF record which matches fabric name and mac address of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)  * currently used FCF record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275)  * If the driver supports only one FCF, it will try to use the FCF record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)  * used by BOOT_BIOS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	struct fcf_record *new_fcf_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	uint32_t boot_flag, addr_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 	uint16_t fcf_index, next_fcf_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	struct lpfc_fcf_rec *fcf_rec = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	uint16_t vlan_id = LPFC_FCOE_NULL_VID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	bool select_new_fcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	/* If there is pending FCoE event restart FCF table scan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	/* Parse the FCF record from the non-embedded mailbox command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 						      &next_fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	if (!new_fcf_record) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 				"2765 Mailbox command READ_FCF_RECORD "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 				"failed to retrieve a FCF record.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 		/* Let next new FCF event trigger fast failover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		phba->hba_flag &= ~FCF_TS_INPROG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	/* Check the FCF record against the connection list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 				      &addr_mode, &vlan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	/* Log the FCF record information if turned on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 				      next_fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	 * If the fcf record does not match with connect list entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	 * read the next entry; otherwise, this is an eligible FCF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	 * record for roundrobin FCF failover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 		lpfc_sli4_fcf_pri_list_del(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 					bf_get(lpfc_fcf_record_fcf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 					       new_fcf_record));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 				"2781 FCF (x%x) failed connection "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 				"list check: (x%x/x%x/%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 				bf_get(lpfc_fcf_record_fcf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 				       new_fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 				bf_get(lpfc_fcf_record_fcf_avail,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 				       new_fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 				bf_get(lpfc_fcf_record_fcf_valid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 				       new_fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 				bf_get(lpfc_fcf_record_fcf_sol,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 				       new_fcf_record));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 		if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		    lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		    new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 			if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 			    phba->fcf.current_rec.fcf_indx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 				lpfc_printf_log(phba, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 						LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 					"2862 FCF (x%x) matches property "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 					"of in-use FCF (x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 					bf_get(lpfc_fcf_record_fcf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 					       new_fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 					phba->fcf.current_rec.fcf_indx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 				goto read_next_fcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 			 * In case the current in-use FCF record becomes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 			 * invalid/unavailable during FCF discovery that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 			 * was not triggered by fast FCF failover process,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 			 * treat it as fast FCF failover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 			if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 			    !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 						"2835 Invalid in-use FCF "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 						"(x%x), enter FCF failover "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 						"table scan.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 						phba->fcf.current_rec.fcf_indx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 				spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 				phba->fcf.fcf_flag |= FCF_REDISC_FOV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 				spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 				lpfc_sli4_mbox_cmd_free(phba, mboxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 				lpfc_sli4_fcf_scan_read_fcf_rec(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 						LPFC_FCOE_FCF_GET_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 		goto read_next_fcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 		fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 		rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 							new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 			goto read_next_fcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	 * If this is not the first FCF discovery of the HBA, use last
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	 * FCF record for the discovery. The condition that a rescan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	 * matches the in-use FCF record: fabric name, switch name, mac
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	 * address, and vlan_id.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	if (phba->fcf.fcf_flag & FCF_IN_USE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 		if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 			lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 		    new_fcf_record, vlan_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 			if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 			    phba->fcf.current_rec.fcf_indx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 				phba->fcf.fcf_flag |= FCF_AVAILABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 				if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 					/* Stop FCF redisc wait timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 					__lpfc_sli4_stop_fcf_redisc_wait_timer(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 									phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 				else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 					/* Fast failover, mark completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 					phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 				spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 						"2836 New FCF matches in-use "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 						"FCF (x%x), port_state:x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 						"fc_flag:x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 						phba->fcf.current_rec.fcf_indx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 						phba->pport->port_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 						phba->pport->fc_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 					"2863 New FCF (x%x) matches "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 					"property of in-use FCF (x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 					bf_get(lpfc_fcf_record_fcf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 					       new_fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 					phba->fcf.current_rec.fcf_indx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 		 * Read next FCF record from HBA searching for the matching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		 * with in-use record only if not during the fast failover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 		 * period. In case of fast failover period, it shall try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 		 * determine whether the FCF record just read should be the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 		 * next candidate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 			spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 			goto read_next_fcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	 * Update on failover FCF record only if it's in FCF fast-failover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 	 * period; otherwise, update on current FCF record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 		fcf_rec = &phba->fcf.failover_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 		fcf_rec = &phba->fcf.current_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		 * If the driver FCF record does not have boot flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		 * set and new hba fcf record has boot flag set, use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		 * the new hba fcf record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 		if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 			/* Choose this FCF record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 					"2837 Update current FCF record "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 					"(x%x) with new FCF record (x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 					fcf_rec->fcf_indx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 					bf_get(lpfc_fcf_record_fcf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 					new_fcf_record));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 			__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 					addr_mode, vlan_id, BOOT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 			spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 			goto read_next_fcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 		 * If the driver FCF record has boot flag set and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 		 * new hba FCF record does not have boot flag, read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 		 * the next FCF record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 		if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 			spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 			goto read_next_fcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 		 * If the new hba FCF record has lower priority value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 		 * than the driver FCF record, use the new record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 		if (new_fcf_record->fip_priority < fcf_rec->priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 			/* Choose the new FCF record with lower priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 					"2838 Update current FCF record "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 					"(x%x) with new FCF record (x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 					fcf_rec->fcf_indx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 					bf_get(lpfc_fcf_record_fcf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 					       new_fcf_record));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 			__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 					addr_mode, vlan_id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 			/* Reset running random FCF selection count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 			phba->fcf.eligible_fcf_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 		} else if (new_fcf_record->fip_priority == fcf_rec->priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 			/* Update running random FCF selection count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 			phba->fcf.eligible_fcf_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 			select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 						phba->fcf.eligible_fcf_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 			if (select_new_fcf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 					"2839 Update current FCF record "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 					"(x%x) with new FCF record (x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 					fcf_rec->fcf_indx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 					bf_get(lpfc_fcf_record_fcf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 					       new_fcf_record));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 				/* Choose the new FCF by random selection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 				__lpfc_update_fcf_record(phba, fcf_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 							 new_fcf_record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 							 addr_mode, vlan_id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 		spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 		goto read_next_fcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 	 * This is the first suitable FCF record, choose this record for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	 * initial best-fit FCF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 	if (fcf_rec) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 				"2840 Update initial FCF candidate "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 				"with FCF (x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 				bf_get(lpfc_fcf_record_fcf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 				       new_fcf_record));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 		__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 					 addr_mode, vlan_id, (boot_flag ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 					 BOOT_ENABLE : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 		phba->fcf.fcf_flag |= FCF_AVAILABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 		/* Setup initial running random FCF selection count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 		phba->fcf.eligible_fcf_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	goto read_next_fcf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) read_next_fcf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	lpfc_sli4_mbox_cmd_free(phba, mboxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 		if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 			 * Case of FCF fast failover scan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 			 * It has not found any suitable FCF record, cancel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 			 * FCF scan inprogress, and do nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 			if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 					       "2782 No suitable FCF found: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 					       "(x%x/x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 					       phba->fcoe_eventtag_at_fcf_scan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 					       bf_get(lpfc_fcf_record_fcf_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 						      new_fcf_record));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 				spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 				if (phba->hba_flag & HBA_DEVLOSS_TMO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 					phba->hba_flag &= ~FCF_TS_INPROG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 					spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 					/* Unregister in-use FCF and rescan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 					lpfc_printf_log(phba, KERN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 							LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 							"2864 On devloss tmo "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 							"unreg in-use FCF and "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 							"rescan FCF table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 					lpfc_unregister_fcf_rescan(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 					return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 				 * Let next new FCF event trigger fast failover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 				phba->hba_flag &= ~FCF_TS_INPROG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 				spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 			 * It has found a suitable FCF record that is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 			 * the same as in-use FCF record, unregister the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 			 * in-use FCF record, replace the in-use FCF record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 			 * with the new FCF record, mark FCF fast failover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 			 * completed, and then start register the new FCF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 			 * record.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 			/* Unregister the current in-use FCF record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 			lpfc_unregister_fcf(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 			/* Replace in-use record with the new record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 					"2842 Replace in-use FCF (x%x) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 					"with failover FCF (x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 					phba->fcf.current_rec.fcf_indx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 					phba->fcf.failover_rec.fcf_indx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 			memcpy(&phba->fcf.current_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 			       &phba->fcf.failover_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 			       sizeof(struct lpfc_fcf_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 			 * Mark the fast FCF failover rediscovery completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 			 * and the start of the first round of the roundrobin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 			 * FCF failover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 			spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 			phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 			spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 			/* Register to the new FCF record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 			lpfc_register_fcf(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 			 * In case of transaction period to fast FCF failover,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 			 * do nothing when search to the end of the FCF table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 			if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 			    (phba->fcf.fcf_flag & FCF_REDISC_PEND))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 			if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 				phba->fcf.fcf_flag & FCF_IN_USE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 				 * In case the current in-use FCF record no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 				 * longer existed during FCF discovery that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 				 * was not triggered by fast FCF failover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 				 * process, treat it as fast FCF failover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 						"2841 In-use FCF record (x%x) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 						"not reported, entering fast "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 						"FCF failover mode scanning.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 						phba->fcf.current_rec.fcf_indx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 				spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 				phba->fcf.fcf_flag |= FCF_REDISC_FOV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 				spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 				lpfc_sli4_fcf_scan_read_fcf_rec(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 						LPFC_FCOE_FCF_GET_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 			/* Register to the new FCF record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 			lpfc_register_fcf(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 		lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	lpfc_sli4_mbox_cmd_free(phba, mboxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	lpfc_register_fcf(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641)  * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643)  * @mboxq: pointer to mailbox object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645)  * This is the callback function for FLOGI failure roundrobin FCF failover
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)  * read FCF record mailbox command from the eligible FCF record bmask for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647)  * performing the failover. If the FCF read back is not valid/available, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648)  * fails through to retrying FLOGI to the currently registered FCF again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)  * Otherwise, if the FCF read back is valid and available, it will set the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)  * newly read FCF record to the failover FCF record, unregister currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)  * registered FCF record, copy the failover FCF record to the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)  * FCF record, and then register the current FCF record before proceeding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)  * to trying FLOGI on the new failover FCF.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	struct fcf_record *new_fcf_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	uint32_t boot_flag, addr_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	uint16_t next_fcf_index, fcf_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	uint16_t current_fcf_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 	uint16_t vlan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	/* If link state is not up, stop the roundrobin failover process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 	if (phba->link_state < LPFC_LINK_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 		spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 		phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 		phba->hba_flag &= ~FCF_RR_INPROG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 		spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	/* Parse the FCF record from the non-embedded mailbox command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 						      &next_fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	if (!new_fcf_record) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 				"2766 Mailbox command READ_FCF_RECORD "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 				"failed to retrieve a FCF record. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 				"hba_flg x%x fcf_flg x%x\n", phba->hba_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 				phba->fcf.fcf_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 		lpfc_unregister_fcf_rescan(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	/* Get the needed parameters from FCF record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 				      &addr_mode, &vlan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	/* Log the FCF record information if turned on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 				      next_fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 				"2848 Remove ineligible FCF (x%x) from "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 				"from roundrobin bmask\n", fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 		/* Clear roundrobin bmask bit for ineligible FCF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 		lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 		/* Perform next round of roundrobin FCF failover */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 		fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 		rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 		goto error_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	if (fcf_index == phba->fcf.current_rec.fcf_indx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 				"2760 Perform FLOGI roundrobin FCF failover: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 				"FCF (x%x) back to FCF (x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 				phba->fcf.current_rec.fcf_indx, fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 		/* Wait 500 ms before retrying FLOGI to current FCF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 		msleep(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 		lpfc_issue_init_vfi(phba->pport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	/* Upload new FCF record to the failover FCF record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 			"2834 Update current FCF (x%x) with new FCF (x%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 			phba->fcf.failover_rec.fcf_indx, fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 	__lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 				 new_fcf_record, addr_mode, vlan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 				 (boot_flag ? BOOT_ENABLE : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	current_fcf_index = phba->fcf.current_rec.fcf_indx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 	/* Unregister the current in-use FCF record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 	lpfc_unregister_fcf(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 	/* Replace in-use record with the new record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	       sizeof(struct lpfc_fcf_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 			"2783 Perform FLOGI roundrobin FCF failover: FCF "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 			"(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) error_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 	lpfc_register_fcf(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	lpfc_sli4_mbox_cmd_free(phba, mboxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751)  * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753)  * @mboxq: pointer to mailbox object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)  * This is the callback function of read FCF record mailbox command for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)  * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757)  * failover when a new FCF event happened. If the FCF read back is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)  * valid/available and it passes the connection list check, it updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)  * the bmask for the eligible FCF record for roundrobin failover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 	struct fcf_record *new_fcf_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	uint32_t boot_flag, addr_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 	uint16_t fcf_index, next_fcf_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 	uint16_t vlan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 	/* If link state is not up, no need to proceed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	if (phba->link_state < LPFC_LINK_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 	/* If FCF discovery period is over, no need to proceed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	/* Parse the FCF record from the non-embedded mailbox command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 						      &next_fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 	if (!new_fcf_record) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 				"2767 Mailbox command READ_FCF_RECORD "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 				"failed to retrieve a FCF record.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	/* Check the connection list for eligibility */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 				      &addr_mode, &vlan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 	/* Log the FCF record information if turned on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 				      next_fcf_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 	/* Update the eligible FCF record index bmask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 	fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 	rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 	lpfc_sli4_mbox_cmd_free(phba, mboxq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809)  * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)  * @mboxq: pointer to mailbox data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)  * This function handles completion of init vfi mailbox command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 	struct lpfc_vport *vport = mboxq->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 	 * VFI not supported on interface type 0, just do the flogi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	 * Also continue if the VFI is in use - just use the same one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 	if (mboxq->u.mb.mbxStatus &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 			LPFC_SLI_INTF_IF_TYPE_0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 	    mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 				 "2891 Init VFI mailbox failed 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 				 mboxq->u.mb.mbxStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 		mempool_free(mboxq, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 	lpfc_initial_flogi(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 	mempool_free(mboxq, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842)  * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843)  * @vport: pointer to lpfc_vport data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845)  * This function issue a init_vfi mailbox command to initialize the VFI and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)  * VPI for the physical port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) lpfc_issue_init_vfi(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 	LPFC_MBOXQ_t *mboxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 	struct lpfc_hba *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	if (!mboxq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 		lpfc_printf_vlog(vport, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 			LOG_TRACE_EVENT, "2892 Failed to allocate "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 			"init_vfi mailbox\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 	lpfc_init_vfi(mboxq, vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 	mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 	if (rc == MBX_NOT_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 				 "2893 Failed to issue init_vfi mailbox\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 		mempool_free(mboxq, vport->phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873)  * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875)  * @mboxq: pointer to mailbox data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877)  * This function handles completion of init vpi mailbox command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 	struct lpfc_vport *vport = mboxq->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 	if (mboxq->u.mb.mbxStatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 				 "2609 Init VPI mailbox failed 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 				 mboxq->u.mb.mbxStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 		mempool_free(mboxq, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 	vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 	spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 	/* If this port is physical port or FDISC is done, do reg_vpi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 	if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 			ndlp = lpfc_findnode_did(vport, Fabric_DID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 			if (!ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 				lpfc_printf_vlog(vport, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 					LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 					"2731 Cannot find fabric "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 					"controller node\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 				lpfc_register_new_vport(phba, vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 			mempool_free(mboxq, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 	if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 		lpfc_initial_fdisc(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 		lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 				 "2606 No NPIV Fabric support\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	mempool_free(mboxq, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)  * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925)  * @vport: pointer to lpfc_vport data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)  * This function issue a init_vpi mailbox command to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928)  * VPI for the vport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) lpfc_issue_init_vpi(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 	LPFC_MBOXQ_t *mboxq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) 	int rc, vpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) 	if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) 		vpi = lpfc_alloc_vpi(vport->phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 		if (!vpi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 					 "3303 Failed to obtain vport vpi\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 		vport->vpi = vpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 	mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	if (!mboxq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 		lpfc_printf_vlog(vport, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 			LOG_TRACE_EVENT, "2607 Failed to allocate "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 			"init_vpi mailbox\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	mboxq->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 	rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 	if (rc == MBX_NOT_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 				 "2608 Failed to issue init_vpi mailbox\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 		mempool_free(mboxq, vport->phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966)  * lpfc_start_fdiscs - send fdiscs for each vports on this port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969)  * This function loops through the list of vports on the @phba and issues an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970)  * FDISC if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) lpfc_start_fdiscs(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 	struct lpfc_vport **vports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 	vports = lpfc_create_vport_work_array(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 	if (vports != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 			if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 			/* There are no vpi for this vport */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 			if (vports[i]->vpi > phba->max_vpi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 				lpfc_vport_set_state(vports[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 						     FC_VPORT_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 			if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 				lpfc_vport_set_state(vports[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 						     FC_VPORT_LINKDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 			if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 				lpfc_issue_init_vpi(vports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 			if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 				lpfc_initial_fdisc(vports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 			else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 				lpfc_vport_set_state(vports[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) 						     FC_VPORT_NO_FABRIC_SUPP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 				lpfc_printf_vlog(vports[i], KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) 						 LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 						 "0259 No NPIV "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) 						 "Fabric support\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	lpfc_destroy_vport_work_array(phba, vports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 	struct lpfc_dmabuf *dmabuf = mboxq->ctx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 	struct lpfc_vport *vport = mboxq->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 	 * VFI not supported for interface type 0, so ignore any mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	 * error (except VFI in use) and continue with the discovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 	if (mboxq->u.mb.mbxStatus &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 	    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 			LPFC_SLI_INTF_IF_TYPE_0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 	    mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 				 "2018 REG_VFI mbxStatus error x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 				 "HBA state x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 				 mboxq->u.mb.mbxStatus, vport->port_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 			/* FLOGI failed, use loop map to make discovery list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 			lpfc_disc_list_loopmap(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 			/* Start discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 			lpfc_disc_start(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 			goto out_free_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 		goto out_free_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	/* If the VFI is already registered, there is nothing else to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) 	 * Unless this was a VFI update and we are in PT2PT mode, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 	 * we should drop through to set the port state to ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) 	if (vport->fc_flag & FC_VFI_REGISTERED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 		if (!(phba->sli_rev == LPFC_SLI_REV4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 		      vport->fc_flag & FC_PT2PT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 			goto out_free_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 	/* The VPI is implicitly registered when the VFI is registered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 	spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 	vport->vpi_state |= LPFC_VPI_REGISTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 	vport->fc_flag |= FC_VFI_REGISTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 	vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 	spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 	/* In case SLI4 FC loopback test, we are ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 	if ((phba->sli_rev == LPFC_SLI_REV4) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 	    (phba->link_flag & LS_LOOPBACK_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 		phba->link_state = LPFC_HBA_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 		goto out_free_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 			 "3313 cmpl reg vfi  port_state:%x fc_flag:%x myDid:%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 			 "alpacnt:%d LinkState:%x topology:%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 			 vport->port_state, vport->fc_flag, vport->fc_myDID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 			 vport->phba->alpa_map[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 			 phba->link_state, phba->fc_topology);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 		 * For private loop or for NPort pt2pt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 		 * just start discovery and we are done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 		if ((vport->fc_flag & FC_PT2PT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 		    ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 		    !(vport->fc_flag & FC_PUBLIC_LOOP))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 			/* Use loop map to make discovery list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 			lpfc_disc_list_loopmap(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 			/* Start discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 			if (vport->fc_flag & FC_PT2PT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 				vport->port_state = LPFC_VPORT_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 				lpfc_disc_start(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 			lpfc_start_fdiscs(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 			lpfc_do_scr_ns_plogi(phba, vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) out_free_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) 	mempool_free(mboxq, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) 	if (dmabuf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) 		lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 		kfree(dmabuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	MAILBOX_t *mb = &pmb->u.mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 	struct lpfc_vport  *vport = pmb->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) 	struct serv_parm *sp = &vport->fc_sparam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	uint32_t ed_tov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 	/* Check for error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) 	if (mb->mbxStatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) 		/* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) 				 "0319 READ_SPARAM mbxStatus error x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 				 "hba state x%x>\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) 				 mb->mbxStatus, vport->port_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) 		lpfc_linkdown(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) 	memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 	       sizeof (struct serv_parm));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) 	ed_tov = be32_to_cpu(sp->cmn.e_d_tov);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 	if (sp->cmn.edtovResolution)	/* E_D_TOV ticks are in nanoseconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 		ed_tov = (ed_tov + 999999) / 1000000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) 	phba->fc_edtov = ed_tov;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 	phba->fc_ratov = (2 * ed_tov) / 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) 	if (phba->fc_ratov < FF_DEF_RATOV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) 		/* RA_TOV should be atleast 10sec for initial flogi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 		phba->fc_ratov = FF_DEF_RATOV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) 	lpfc_update_vport_wwn(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 	if (vport->port_type == LPFC_PHYSICAL_PORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 		memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 		memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) 	kfree(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 	mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 	/* Check if sending the FLOGI is being deferred to after we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) 	 * up to date CSPs from MBX_READ_SPARAM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 	if (phba->hba_flag & HBA_DEFER_FLOGI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) 		lpfc_initial_flogi(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 		phba->hba_flag &= ~HBA_DEFER_FLOGI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 	pmb->ctx_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 	kfree(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) 	lpfc_issue_clear_la(phba, vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 	mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 	struct lpfc_vport *vport = phba->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) 	LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) 	struct Scsi_Host *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 	struct lpfc_dmabuf *mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 	struct fcf_record *fcf_record;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) 	uint32_t fc_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) 	unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 	spin_lock_irqsave(&phba->hbalock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) 	phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) 	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) 		switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 		case LPFC_LINK_SPEED_1GHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) 		case LPFC_LINK_SPEED_2GHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 		case LPFC_LINK_SPEED_4GHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 		case LPFC_LINK_SPEED_8GHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 		case LPFC_LINK_SPEED_10GHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 		case LPFC_LINK_SPEED_16GHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 		case LPFC_LINK_SPEED_32GHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 		case LPFC_LINK_SPEED_64GHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 		case LPFC_LINK_SPEED_128GHZ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 			phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 	if (phba->fc_topology &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 	    phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 				"3314 Toplogy changed was 0x%x is 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 				phba->fc_topology,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 				bf_get(lpfc_mbx_read_top_topology, la));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 		phba->fc_topology_changed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) 	phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 	phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 	shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 	if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 		phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 		/* if npiv is enabled and this adapter supports npiv log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 		 * a message that npiv is not supported in this topology
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 		if (phba->cfg_enable_npiv && phba->max_vpi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 				"1309 Link Up Event npiv not supported in loop "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 				"topology\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 				/* Get Loop Map information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 		if (bf_get(lpfc_mbx_read_top_il, la))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 			fc_flags |= FC_LBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 		vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 		i = la->lilpBde64.tus.f.bdeSize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 		if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 			phba->alpa_map[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 			if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 				int numalpa, j, k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 				union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 					uint8_t pamap[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 					struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 						uint32_t wd1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 						uint32_t wd2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 						uint32_t wd3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 						uint32_t wd4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 					} pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 				} un;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 				numalpa = phba->alpa_map[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 				j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 				while (j < numalpa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 					memset(un.pamap, 0, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 					for (k = 1; j < numalpa; k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) 						un.pamap[k - 1] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 							phba->alpa_map[j + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) 						j++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) 						if (k == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 							break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 					/* Link Up Event ALPA map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 					lpfc_printf_log(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 							KERN_WARNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 							LOG_LINK_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 							"1304 Link Up Event "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 							"ALPA map Data: x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 							"x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 							un.pa.wd1, un.pa.wd2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 							un.pa.wd3, un.pa.wd4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 		if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 			if (phba->max_vpi && phba->cfg_enable_npiv &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 			   (phba->sli_rev >= LPFC_SLI_REV3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 				phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 		vport->fc_myDID = phba->fc_pref_DID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 		fc_flags |= FC_LBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 	spin_unlock_irqrestore(&phba->hbalock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 	if (fc_flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 		spin_lock_irqsave(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 		vport->fc_flag |= fc_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 		spin_unlock_irqrestore(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 	lpfc_linkup(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 	sparam_mbox = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 	sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) 	if (!sparam_mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 	rc = lpfc_read_sparam(phba, sparam_mbox, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 		mempool_free(sparam_mbox, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 	sparam_mbox->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) 	sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) 	rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) 	if (rc == MBX_NOT_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) 		mp = (struct lpfc_dmabuf *)sparam_mbox->ctx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) 		kfree(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 		mempool_free(sparam_mbox, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) 		cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 		if (!cfglink_mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) 		vport->port_state = LPFC_LOCAL_CFG_LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 		lpfc_config_link(phba, cfglink_mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 		cfglink_mbox->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 		cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) 		rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 		if (rc == MBX_NOT_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) 			mempool_free(cfglink_mbox, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 		vport->port_state = LPFC_VPORT_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 		 * Add the driver's default FCF record at FCF index 0 now. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 		 * is phase 1 implementation that support FCF index 0 and driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) 		 * defaults.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) 		if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) 			fcf_record = kzalloc(sizeof(struct fcf_record),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 			if (unlikely(!fcf_record)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 				lpfc_printf_log(phba, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 					LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 					"2554 Could not allocate memory for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 					"fcf record\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) 				rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 			lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 						LPFC_FCOE_FCF_DEF_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 			rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 			if (unlikely(rc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 				lpfc_printf_log(phba, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 					LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 					"2013 Could not manually add FCF "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 					"record 0, status %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 				rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 				kfree(fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 			kfree(fcf_record);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 		 * The driver is expected to do FIP/FCF. Call the port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 		 * and get the FCF Table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 		spin_lock_irqsave(&phba->hbalock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 		if (phba->hba_flag & FCF_TS_INPROG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 			spin_unlock_irqrestore(&phba->hbalock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 		/* This is the initial FCF discovery scan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 		phba->fcf.fcf_flag |= FCF_INIT_DISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 		spin_unlock_irqrestore(&phba->hbalock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 				"2778 Start FCF table scan at linkup\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 						     LPFC_FCOE_FCF_GET_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 			spin_lock_irqsave(&phba->hbalock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 			phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) 			spin_unlock_irqrestore(&phba->hbalock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) 		/* Reset FCF roundrobin bmask for new discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 		lpfc_sli4_clear_fcf_rr_bmask(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 	/* Prepare for LINK up registrations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 	memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 	scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 		  init_utsname()->nodename);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 			 "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 			 vport->port_state, sparam_mbox, cfglink_mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 	lpfc_issue_clear_la(phba, vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) lpfc_enable_la(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 	uint32_t control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 	struct lpfc_sli *psli = &phba->sli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 	psli->sli_flag |= LPFC_PROCESS_LA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 	if (phba->sli_rev <= LPFC_SLI_REV3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 		control = readl(phba->HCregaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 		control |= HC_LAINT_ENA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 		writel(control, phba->HCregaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 		readl(phba->HCregaddr); /* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 	lpfc_linkdown(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 	lpfc_enable_la(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 	lpfc_unregister_unused_fcf(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) 	/* turn on Link Attention interrupts - no CLEAR_LA needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423)  * This routine handles processing a READ_TOPOLOGY mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424)  * command upon completion. It is setup in the LPFC_MBOXQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425)  * as the completion routine when the command is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426)  * handed off to the SLI layer. SLI4 only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 	struct lpfc_vport *vport = pmb->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 	struct lpfc_mbx_read_top *la;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 	struct lpfc_sli_ring *pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 	MAILBOX_t *mb = &pmb->u.mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 	uint8_t attn_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 	unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 	/* Unblock ELS traffic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 	pring = lpfc_phba_elsring(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 	if (pring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 		pring->flag &= ~LPFC_STOP_IOCB_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 	/* Check for error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 	if (mb->mbxStatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 				"1307 READ_LA mbox error x%x state x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 				mb->mbxStatus, vport->port_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 		lpfc_mbx_issue_link_down(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 		phba->link_state = LPFC_HBA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 		goto lpfc_mbx_cmpl_read_topology_free_mbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 	la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 	attn_type = bf_get(lpfc_mbx_read_top_att_type, la);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 	memcpy(&phba->alpa_map[0], mp->virt, 128);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 	spin_lock_irqsave(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 	if (bf_get(lpfc_mbx_read_top_pb, la))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 		vport->fc_flag |= FC_BYPASSED_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 		vport->fc_flag &= ~FC_BYPASSED_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 	spin_unlock_irqrestore(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 	if (phba->fc_eventTag <= la->eventTag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 		phba->fc_stat.LinkMultiEvent++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 		if (attn_type == LPFC_ATT_LINK_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 			if (phba->fc_eventTag != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 				lpfc_linkdown(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 	phba->fc_eventTag = la->eventTag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 	if (phba->sli_rev < LPFC_SLI_REV4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 		spin_lock_irqsave(&phba->hbalock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 		if (bf_get(lpfc_mbx_read_top_mm, la))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 			phba->sli.sli_flag |= LPFC_MENLO_MAINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 			phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 		spin_unlock_irqrestore(&phba->hbalock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 	phba->link_events++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 	if ((attn_type == LPFC_ATT_LINK_UP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 	    !(phba->sli.sli_flag & LPFC_MENLO_MAINT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 		phba->fc_stat.LinkUp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 		if (phba->link_flag & LS_LOOPBACK_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 					"1306 Link Up Event in loop back mode "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) 					"x%x received Data: x%x x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 					la->eventTag, phba->fc_eventTag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 					bf_get(lpfc_mbx_read_top_alpa_granted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 					       la),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) 					bf_get(lpfc_mbx_read_top_link_spd, la),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 					phba->alpa_map[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) 					"1303 Link Up Event x%x received "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 					"Data: x%x x%x x%x x%x x%x x%x %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 					la->eventTag, phba->fc_eventTag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 					bf_get(lpfc_mbx_read_top_alpa_granted,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 					       la),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 					bf_get(lpfc_mbx_read_top_link_spd, la),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 					phba->alpa_map[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 					bf_get(lpfc_mbx_read_top_mm, la),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 					bf_get(lpfc_mbx_read_top_fa, la),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 					phba->wait_4_mlo_maint_flg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 		lpfc_mbx_process_link_up(phba, la);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 	} else if (attn_type == LPFC_ATT_LINK_DOWN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 		   attn_type == LPFC_ATT_UNEXP_WWPN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 		phba->fc_stat.LinkDown++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 		if (phba->link_flag & LS_LOOPBACK_MODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 				"1308 Link Down Event in loop back mode "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 				"x%x received "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 				"Data: x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 				la->eventTag, phba->fc_eventTag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 				phba->pport->port_state, vport->fc_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 		else if (attn_type == LPFC_ATT_UNEXP_WWPN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 				"1313 Link Down Unexpected FA WWPN Event x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 				"received Data: x%x x%x x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 				la->eventTag, phba->fc_eventTag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 				phba->pport->port_state, vport->fc_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 				bf_get(lpfc_mbx_read_top_mm, la),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 				bf_get(lpfc_mbx_read_top_fa, la));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 				"1305 Link Down Event x%x received "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 				"Data: x%x x%x x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 				la->eventTag, phba->fc_eventTag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 				phba->pport->port_state, vport->fc_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 				bf_get(lpfc_mbx_read_top_mm, la),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 				bf_get(lpfc_mbx_read_top_fa, la));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 		lpfc_mbx_issue_link_down(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 	if (phba->sli.sli_flag & LPFC_MENLO_MAINT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 	    attn_type == LPFC_ATT_LINK_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 		if (phba->link_state != LPFC_LINK_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 			phba->fc_stat.LinkDown++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 			lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 				"1312 Link Down Event x%x received "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 				"Data: x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 				la->eventTag, phba->fc_eventTag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 				phba->pport->port_state, vport->fc_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 			lpfc_mbx_issue_link_down(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 			lpfc_enable_la(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) 		lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 				"1310 Menlo Maint Mode Link up Event x%x rcvd "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 				"Data: x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 				la->eventTag, phba->fc_eventTag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 				phba->pport->port_state, vport->fc_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 		 * The cmnd that triggered this will be waiting for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) 		 * signal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 		/* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 		if (phba->wait_4_mlo_maint_flg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 			phba->wait_4_mlo_maint_flg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 			wake_up_interruptible(&phba->wait_4_mlo_m_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 	if ((phba->sli_rev < LPFC_SLI_REV4) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 	    bf_get(lpfc_mbx_read_top_fa, la)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 		if (phba->sli.sli_flag & LPFC_MENLO_MAINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 			lpfc_issue_clear_la(phba, vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 		lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 				"1311 fa %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 				bf_get(lpfc_mbx_read_top_fa, la));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) lpfc_mbx_cmpl_read_topology_free_mbuf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 	kfree(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 	mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585)  * This routine handles processing a REG_LOGIN mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586)  * command upon completion. It is setup in the LPFC_MBOXQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587)  * as the completion routine when the command is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588)  * handed off to the SLI layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) 	struct lpfc_vport  *vport = pmb->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 	pmb->ctx_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 	pmb->ctx_ndlp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) 	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 			 "0002 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) 			 kref_read(&ndlp->kref),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 			 ndlp->nlp_usg_map, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 	if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 		ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 	if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 	    ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 		/* We rcvd a rscn after issuing this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 		 * mbox reg login, we may have cycled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 		 * back through the state and be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 		 * back at reg login state so this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 		 * mbox needs to be ignored becase
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 		 * there is another reg login in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 		 * process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 		spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 		ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 		spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) 		 * We cannot leave the RPI registered because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 		 * if we go thru discovery again for this ndlp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) 		 * a subsequent REG_RPI will fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 		ndlp->nlp_flag |= NLP_RPI_REGISTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 		lpfc_unreg_rpi(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 	/* Call state machine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) 	lpfc_disc_state_machine(vport, ndlp, pmb, NLP_EVT_CMPL_REG_LOGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 	kfree(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 	mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 	/* decrement the node reference count held for this callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 	 * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 	lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) 	MAILBOX_t *mb = &pmb->u.mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 	struct lpfc_vport *vport = pmb->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 	switch (mb->mbxStatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) 	case 0x0011:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) 	case 0x0020:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) 				 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) 				 mb->mbxStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) 	/* If VPI is busy, reset the HBA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) 	case 0x9700:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) 			"2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) 			vport->vpi, mb->mbxStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) 		if (!(phba->pport->load_flag & FC_UNLOADING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) 			lpfc_workq_post_event(phba, NULL, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) 				LPFC_EVT_RESET_HBA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) 	spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) 	vport->vpi_state &= ~LPFC_VPI_REGISTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) 	vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) 	spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) 	vport->unreg_vpi_cmpl = VPORT_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) 	mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) 	lpfc_cleanup_vports_rrqs(vport, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) 	 * This shost reference might have been taken at the beginning of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) 	 * lpfc_vport_delete()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) 	if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) 		scsi_host_put(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) 	struct lpfc_hba  *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) 	LPFC_MBOXQ_t *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) 	if (!mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) 	lpfc_unreg_vpi(phba, vport->vpi, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) 	mbox->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) 	mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) 	if (rc == MBX_NOT_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) 				 "1800 Could not issue unreg_vpi\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) 		mempool_free(mbox, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) 		vport->unreg_vpi_cmpl = VPORT_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) 	struct lpfc_vport *vport = pmb->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) 	MAILBOX_t *mb = &pmb->u.mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) 	switch (mb->mbxStatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) 	case 0x0011:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) 	case 0x9601:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) 	case 0x9602:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) 				 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) 				 mb->mbxStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) 		spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) 		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) 		spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) 		vport->fc_myDID = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) 		if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) 		    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) 			if (phba->nvmet_support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) 				lpfc_nvmet_update_targetport(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) 				lpfc_nvme_update_localport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) 	spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) 	vport->vpi_state |= LPFC_VPI_REGISTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) 	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) 	spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) 	vport->num_disc_nodes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) 	/* go thru NPR list and issue ELS PLOGIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) 	if (vport->fc_npr_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) 		lpfc_els_disc_plogi(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) 	if (!vport->num_disc_nodes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) 		spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) 		vport->fc_flag &= ~FC_NDISC_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) 		spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) 		lpfc_can_disctmo(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) 	vport->port_state = LPFC_VPORT_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) 	mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762)  * lpfc_create_static_vport - Read HBA config region to create static vports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765)  * This routine issue a DUMP mailbox command for config region 22 to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766)  * the list of static vports to be created. The function create vports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767)  * based on the information returned from the HBA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) lpfc_create_static_vport(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) 	LPFC_MBOXQ_t *pmb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) 	MAILBOX_t *mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) 	struct static_vport_info *vport_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) 	int mbx_wait_rc = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) 	struct fc_vport_identifiers vport_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) 	struct fc_vport *new_fc_vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) 	struct Scsi_Host *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) 	struct lpfc_vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) 	uint16_t offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) 	uint8_t *vport_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) 	struct lpfc_dmabuf *mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) 	uint32_t byte_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) 	if (!pmb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) 				"0542 lpfc_create_static_vport failed to"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) 				" allocate mailbox memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) 	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) 	mb = &pmb->u.mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) 	vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) 	if (!vport_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) 				"0543 lpfc_create_static_vport failed to"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) 				" allocate vport_info\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) 		mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) 	vport_buff = (uint8_t *) vport_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) 		/* free dma buffer from previous round */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) 		if (pmb->ctx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) 			mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) 			kfree(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) 		if (lpfc_dump_static_vport(phba, pmb, offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) 		pmb->vport = phba->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) 		mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) 							LPFC_MBOX_TMO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) 		if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) 				"0544 lpfc_create_static_vport failed to"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) 				" issue dump mailbox command ret 0x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) 				"status 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) 				mbx_wait_rc, mb->mbxStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) 		if (phba->sli_rev == LPFC_SLI_REV4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) 			byte_count = pmb->u.mqe.un.mb_words[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) 			mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) 			if (byte_count > sizeof(struct static_vport_info) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) 					offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) 				byte_count = sizeof(struct static_vport_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) 					- offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) 			memcpy(vport_buff + offset, mp->virt, byte_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) 			offset += byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) 			if (mb->un.varDmp.word_cnt >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) 				sizeof(struct static_vport_info) - offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) 				mb->un.varDmp.word_cnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) 					sizeof(struct static_vport_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) 						- offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) 			byte_count = mb->un.varDmp.word_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) 			lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) 				vport_buff + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) 				byte_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) 			offset += byte_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) 	} while (byte_count &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) 		offset < sizeof(struct static_vport_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) 	if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) 		((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) 			!= VPORT_INFO_REV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) 				"0545 lpfc_create_static_vport bad"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) 				" information header 0x%x 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) 				le32_to_cpu(vport_info->signature),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) 				le32_to_cpu(vport_info->rev) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) 				VPORT_INFO_REV_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) 	shost = lpfc_shost_from_vport(phba->pport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) 	for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) 		memset(&vport_id, 0, sizeof(vport_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) 		vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) 		vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) 		if (!vport_id.port_name || !vport_id.node_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) 		vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) 		vport_id.vport_type = FC_PORTTYPE_NPIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) 		vport_id.disable = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) 		new_fc_vport = fc_vport_create(shost, 0, &vport_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) 		if (!new_fc_vport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) 				"0546 lpfc_create_static_vport failed to"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) 				" create vport\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) 		vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) 		vport->vport_flag |= STATIC_VPORT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) 	kfree(vport_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) 	if (mbx_wait_rc != MBX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) 		if (pmb->ctx_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) 			mp = (struct lpfc_dmabuf *)pmb->ctx_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) 			kfree(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) 		mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908)  * This routine handles processing a Fabric REG_LOGIN mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909)  * command upon completion. It is setup in the LPFC_MBOXQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910)  * as the completion routine when the command is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911)  * handed off to the SLI layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) 	struct lpfc_vport *vport = pmb->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) 	MAILBOX_t *mb = &pmb->u.mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) 	struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) 	struct Scsi_Host *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) 	ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) 	pmb->ctx_ndlp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) 	pmb->ctx_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) 	if (mb->mbxStatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) 				 "0258 Register Fabric login error: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) 				 mb->mbxStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) 		kfree(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) 		mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) 		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) 			/* FLOGI failed, use loop map to make discovery list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) 			lpfc_disc_list_loopmap(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) 			/* Start discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) 			lpfc_disc_start(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) 			/* Decrement the reference count to ndlp after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) 			 * reference to the ndlp are done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) 			lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) 		/* Decrement the reference count to ndlp after the reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) 		 * to the ndlp are done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) 		lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) 	if (phba->sli_rev < LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) 		ndlp->nlp_rpi = mb->un.varWords[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) 	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) 	ndlp->nlp_type |= NLP_FABRIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) 	if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) 		/* when physical port receive logo donot start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) 		 * vport discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) 		if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) 			lpfc_start_fdiscs(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) 			shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) 			spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) 			vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) 			spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) 		lpfc_do_scr_ns_plogi(phba, vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) 	kfree(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) 	mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) 	/* Drop the reference count from the mbox at the end after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) 	 * all the current reference to the ndlp have been done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) 	lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986)  /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987)   * This routine will issue a GID_FT for each FC4 Type supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988)   * by the driver. ALL GID_FTs must complete before discovery is started.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989)   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) lpfc_issue_gidft(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) 	/* Good status, issue CT Request to NameServer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) 	if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) 	    (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) 		if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_FCP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) 			/* Cannot issue NameServer FCP Query, so finish up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) 			 * discovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) 			lpfc_printf_vlog(vport, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) 					 LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) 					 "0604 %s FC TYPE %x %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) 					 "Failed to issue GID_FT to ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) 					 FC_TYPE_FCP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) 					 "Finishing discovery.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) 		vport->gidft_inp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) 	if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) 	    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) 		if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, SLI_CTPT_NVME)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) 			/* Cannot issue NameServer NVME Query, so finish up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) 			 * discovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) 			lpfc_printf_vlog(vport, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) 					 LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) 					 "0605 %s FC_TYPE %x %s %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) 					 "Failed to issue GID_FT to ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) 					 FC_TYPE_NVME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) 					 "Finishing discovery: gidftinp ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) 					 vport->gidft_inp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) 			if (vport->gidft_inp == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) 			vport->gidft_inp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) 	return vport->gidft_inp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033)  * lpfc_issue_gidpt - issue a GID_PT for all N_Ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034)  * @vport: The virtual port for which this call is being executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036)  * This routine will issue a GID_PT to get a list of all N_Ports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038)  * Return value :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039)  *   0 - Failure to issue a GID_PT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040)  *   1 - GID_PT issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) lpfc_issue_gidpt(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) 	/* Good status, issue CT Request to NameServer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) 	if (lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, 0, GID_PT_N_PORT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) 		/* Cannot issue NameServer FCP Query, so finish up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) 		 * discovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) 				 "0606 %s Port TYPE %x %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) 				 "Failed to issue GID_PT to ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) 				 GID_PT_N_PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) 				 "Finishing discovery.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) 	vport->gidft_inp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062)  * This routine handles processing a NameServer REG_LOGIN mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063)  * command upon completion. It is setup in the LPFC_MBOXQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064)  * as the completion routine when the command is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065)  * handed off to the SLI layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) 	MAILBOX_t *mb = &pmb->u.mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) 	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) 	struct lpfc_vport *vport = pmb->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) 	pmb->ctx_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) 	pmb->ctx_ndlp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) 	vport->gidft_inp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) 	if (mb->mbxStatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) 				 "0260 Register NameServer error: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) 				 mb->mbxStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 		/* decrement the node reference count held for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) 		 * callback function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) 		lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) 		kfree(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) 		mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) 		/* If no other thread is using the ndlp, free it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) 		lpfc_nlp_not_used(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) 		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) 			 * RegLogin failed, use loop map to make discovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) 			 * list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) 			lpfc_disc_list_loopmap(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) 			/* Start discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) 			lpfc_disc_start(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) 		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) 	if (phba->sli_rev < LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) 		ndlp->nlp_rpi = mb->un.varWords[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) 	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) 	ndlp->nlp_type |= NLP_FABRIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) 			 "0003 rpi:%x DID:%x flg:%x %d map%x x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) 			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) 			 kref_read(&ndlp->kref),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) 			 ndlp->nlp_usg_map, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) 	if (vport->port_state < LPFC_VPORT_READY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) 		/* Link up discovery requires Fabric registration. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) 		lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) 		lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) 		lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) 		lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) 		if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) 		    (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) 			lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_FCP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) 		if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) 		    (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) 			lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) 				    FC_TYPE_NVME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) 		/* Issue SCR just before NameServer GID_FT Query */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) 		lpfc_issue_els_scr(vport, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) 		lpfc_issue_els_rdf(vport, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) 	vport->fc_ns_retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) 	if (lpfc_issue_gidft(vport) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) 	 * At this point in time we may need to wait for multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) 	 * SLI_CTNS_GID_FT CT commands to complete before we start discovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) 	 * decrement the node reference count held for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) 	 * callback function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) 	lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) 	kfree(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) 	mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) 	struct fc_rport  *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) 	struct lpfc_rport_data *rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) 	struct fc_rport_identifiers rport_ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) 	struct lpfc_hba  *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) 	if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) 	/* Remote port has reappeared. Re-register w/ FC transport */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) 	rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) 	rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) 	rport_ids.port_id = ndlp->nlp_DID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) 	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) 	 * We leave our node pointer in rport->dd_data when we unregister a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) 	 * FCP target port.  But fc_remote_port_add zeros the space to which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) 	 * rport->dd_data points.  So, if we're reusing a previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) 	 * registered port, drop the reference that we took the last time we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) 	 * registered the port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) 	rport = ndlp->rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) 	if (rport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) 		rdata = rport->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) 		/* break the link before dropping the ref */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) 		ndlp->rport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) 		if (rdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) 			if (rdata->pnode == ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) 				lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) 			rdata->pnode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) 		/* drop reference for earlier registeration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) 		put_device(&rport->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) 		"rport add:       did:x%x flg:x%x type x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) 		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) 	/* Don't add the remote port if unloading. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) 	if (vport->load_flag & FC_UNLOADING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) 	ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) 	if (!rport || !get_device(&rport->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) 		dev_printk(KERN_WARNING, &phba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) 			   "Warning: fc_remote_port_add failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) 	/* initialize static port data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) 	rport->maxframe_size = ndlp->nlp_maxframe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) 	rport->supported_classes = ndlp->nlp_class_sup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) 	rdata = rport->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) 	rdata->pnode = lpfc_nlp_get(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) 	if (ndlp->nlp_type & NLP_FCP_TARGET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) 		rport_ids.roles |= FC_PORT_ROLE_FCP_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) 	if (ndlp->nlp_type & NLP_FCP_INITIATOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) 		rport_ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) 	if (ndlp->nlp_type & NLP_NVME_INITIATOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) 		rport_ids.roles |= FC_PORT_ROLE_NVME_INITIATOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) 	if (ndlp->nlp_type & NLP_NVME_TARGET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) 		rport_ids.roles |= FC_PORT_ROLE_NVME_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) 	if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) 		rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) 	if (rport_ids.roles !=  FC_RPORT_ROLE_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) 		fc_remote_port_rolechg(rport, rport_ids.roles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) 			 "3183 rport register x%06x, rport x%px role x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) 			 ndlp->nlp_DID, rport, rport_ids.roles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) 	if ((rport->scsi_target_id != -1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) 	    (rport->scsi_target_id < LPFC_MAX_TARGET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) 		ndlp->nlp_sid = rport->scsi_target_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) 	struct fc_rport *rport = ndlp->rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) 	struct lpfc_vport *vport = ndlp->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) 	if (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) 		"rport delete:    did:x%x flg:x%x type x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) 		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) 			 "3184 rport unregister x%06x, rport x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) 			 ndlp->nlp_DID, rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) 	fc_remote_port_delete(rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) 	unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) 	spin_lock_irqsave(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) 	switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) 	case NLP_STE_UNUSED_NODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) 		vport->fc_unused_cnt += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) 	case NLP_STE_PLOGI_ISSUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) 		vport->fc_plogi_cnt += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) 	case NLP_STE_ADISC_ISSUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) 		vport->fc_adisc_cnt += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) 	case NLP_STE_REG_LOGIN_ISSUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) 		vport->fc_reglogin_cnt += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) 	case NLP_STE_PRLI_ISSUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) 		vport->fc_prli_cnt += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) 	case NLP_STE_UNMAPPED_NODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) 		vport->fc_unmap_cnt += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) 	case NLP_STE_MAPPED_NODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) 		vport->fc_map_cnt += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) 	case NLP_STE_NPR_NODE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) 		if (vport->fc_npr_cnt == 0 && count == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) 			vport->fc_npr_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) 			vport->fc_npr_cnt += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) 	spin_unlock_irqrestore(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) 		       int old_state, int new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) 	if (new_state == NLP_STE_UNMAPPED_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) 		ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) 		ndlp->nlp_type |= NLP_FC_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) 	if (new_state == NLP_STE_MAPPED_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) 		ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) 	if (new_state == NLP_STE_NPR_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) 		ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) 	/* FCP and NVME Transport interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) 	if ((old_state == NLP_STE_MAPPED_NODE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) 	     old_state == NLP_STE_UNMAPPED_NODE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) 		if (ndlp->rport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) 			vport->phba->nport_event_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) 			lpfc_unregister_remote_port(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) 		if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) 			vport->phba->nport_event_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) 			if (vport->phba->nvmet_support == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) 				/* Start devloss if target. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) 				if (ndlp->nlp_type & NLP_NVME_TARGET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) 					lpfc_nvme_unregister_port(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) 				/* NVMET has no upcall. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) 				lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) 	/* FCP and NVME Transport interfaces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) 	if (new_state ==  NLP_STE_MAPPED_NODE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) 	    new_state == NLP_STE_UNMAPPED_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) 		if (ndlp->nlp_fc4_type ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) 		    ndlp->nlp_DID == Fabric_DID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) 		    ndlp->nlp_DID == NameServer_DID ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) 		    ndlp->nlp_DID == FDMI_DID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) 			vport->phba->nport_event_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) 			 * Tell the fc transport about the port, if we haven't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) 			 * already. If we have, and it's a scsi entity, be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) 			lpfc_register_remote_port(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) 		/* Notify the NVME transport of this new rport. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) 		if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) 		    ndlp->nlp_fc4_type & NLP_FC4_NVME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) 			if (vport->phba->nvmet_support == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) 				/* Register this rport with the transport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) 				 * Only NVME Target Rports are registered with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) 				 * the transport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) 				if (ndlp->nlp_type & NLP_NVME_TARGET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) 					vport->phba->nport_event_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) 					lpfc_nvme_register_port(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) 				/* Just take an NDLP ref count since the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) 				 * target does not register rports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) 				lpfc_nlp_get(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) 	if ((new_state ==  NLP_STE_MAPPED_NODE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) 		(vport->stat_data_enabled)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) 		 * A new target is discovered, if there is no buffer for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) 		 * statistical data collection allocate buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) 		ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) 					 sizeof(struct lpfc_scsicmd_bkt),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) 					 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) 		if (!ndlp->lat_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) 				"0286 lpfc_nlp_state_cleanup failed to "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) 				"allocate statistical data buffer DID "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) 				"0x%x\n", ndlp->nlp_DID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) 	 * If the node just added to Mapped list was an FCP target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) 	 * but the remote port registration failed or assigned a target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) 	 * id outside the presentable range - move the node to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) 	 * Unmapped List.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) 	if ((new_state == NLP_STE_MAPPED_NODE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) 	    (ndlp->nlp_type & NLP_FCP_TARGET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) 	    (!ndlp->rport ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) 	     ndlp->rport->scsi_target_id == -1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) 	     ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) 		spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) 		ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) 		spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) static char *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) lpfc_nlp_state_name(char *buffer, size_t size, int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) 	static char *states[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) 		[NLP_STE_UNUSED_NODE] = "UNUSED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) 		[NLP_STE_PLOGI_ISSUE] = "PLOGI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) 		[NLP_STE_ADISC_ISSUE] = "ADISC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) 		[NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) 		[NLP_STE_PRLI_ISSUE] = "PRLI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) 		[NLP_STE_LOGO_ISSUE] = "LOGO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) 		[NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) 		[NLP_STE_MAPPED_NODE] = "MAPPED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) 		[NLP_STE_NPR_NODE] = "NPR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) 	if (state < NLP_STE_MAX_STATE && states[state])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) 		strlcpy(buffer, states[state], size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) 		snprintf(buffer, size, "unknown (%d)", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) 	return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) 		   int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) 	int  old_state = ndlp->nlp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) 	char name1[16], name2[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) 			 "0904 NPort state transition x%06x, %s -> %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) 			 ndlp->nlp_DID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) 			 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) 			 lpfc_nlp_state_name(name2, sizeof(name2), state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) 		"node statechg    did:x%x old:%d ste:%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) 		ndlp->nlp_DID, old_state, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) 	if (old_state == NLP_STE_NPR_NODE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) 	    state != NLP_STE_NPR_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) 		lpfc_cancel_retry_delay_tmo(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) 	if (old_state == NLP_STE_UNMAPPED_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) 		ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) 		ndlp->nlp_type &= ~NLP_FC_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) 	if (list_empty(&ndlp->nlp_listp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) 		spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) 		list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) 		spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) 	} else if (old_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) 		lpfc_nlp_counters(vport, old_state, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) 	ndlp->nlp_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) 	lpfc_nlp_counters(vport, state, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) 	lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) 	if (list_empty(&ndlp->nlp_listp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) 		spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) 		list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) 		spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) 	if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) 		lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) 	spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) 	list_del_init(&ndlp->nlp_listp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) 	spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) 	lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) 				NLP_STE_UNUSED_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) 	if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) 		lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) 	lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) 				NLP_STE_UNUSED_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512)  * lpfc_initialize_node - Initialize all fields of node object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513)  * @vport: Pointer to Virtual Port object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514)  * @ndlp: Pointer to FC node object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515)  * @did: FC_ID of the node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517)  * This function is always called when node object need to be initialized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518)  * It initializes all the fields of the node object. Although the reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519)  * to phba from @ndlp can be obtained indirectly through it's reference to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520)  * @vport, a direct reference to phba is taken here by @ndlp. This is due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521)  * to the life-span of the @ndlp might go beyond the existence of @vport as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522)  * the final release of ndlp is determined by its reference count. And, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523)  * operation on @ndlp needs the reference to phba.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) 	uint32_t did)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) 	INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) 	INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) 	timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) 	INIT_LIST_HEAD(&ndlp->recovery_evt.evt_listp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) 	ndlp->nlp_DID = did;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) 	ndlp->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) 	ndlp->phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) 	ndlp->nlp_sid = NLP_NO_SID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) 	ndlp->nlp_fc4_type = NLP_FC4_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) 	kref_init(&ndlp->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) 	NLP_INT_NODE_ACT(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) 	atomic_set(&ndlp->cmd_pending, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) 	ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) 	ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) struct lpfc_nodelist *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) 		 int state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) 	struct lpfc_hba *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) 	uint32_t did, flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) 	unsigned long *active_rrqs_xri_bitmap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) 	int rpi = LPFC_RPI_ALLOC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) 	uint32_t defer_did = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) 	if (!ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) 	if (phba->sli_rev == LPFC_SLI_REV4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) 		if (ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) 			rpi = lpfc_sli4_alloc_rpi(vport->phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) 			rpi = ndlp->nlp_rpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) 		if (rpi == LPFC_RPI_ALLOC_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) 					 "0359 %s: ndlp:x%px "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) 					 "usgmap:x%x refcnt:%d FAILED RPI "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) 					 " ALLOC\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) 					 __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) 					 (void *)ndlp, ndlp->nlp_usg_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) 					 kref_read(&ndlp->kref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) 	spin_lock_irqsave(&phba->ndlp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) 	/* The ndlp should not be in memory free mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) 	if (NLP_CHK_FREE_REQ(ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) 		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) 				"0277 %s: ndlp:x%px "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) 				"usgmap:x%x refcnt:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) 				__func__, (void *)ndlp, ndlp->nlp_usg_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) 				kref_read(&ndlp->kref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) 		goto free_rpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) 	/* The ndlp should not already be in active mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) 	if (NLP_CHK_NODE_ACT(ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) 		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) 				"0278 %s: ndlp:x%px "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) 				"usgmap:x%x refcnt:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) 				__func__, (void *)ndlp, ndlp->nlp_usg_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) 				kref_read(&ndlp->kref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) 		goto free_rpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) 	/* First preserve the orginal DID, xri_bitmap and some flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) 	did = ndlp->nlp_DID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) 	flag = (ndlp->nlp_flag & NLP_UNREG_INP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) 	if (flag & NLP_UNREG_INP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) 		defer_did = ndlp->nlp_defer_did;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) 	if (phba->sli_rev == LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) 		active_rrqs_xri_bitmap = ndlp->active_rrqs_xri_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) 	/* Zero ndlp except of ndlp linked list pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) 	memset((((char *)ndlp) + sizeof (struct list_head)), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) 		sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) 	/* Next reinitialize and restore saved objects */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) 	lpfc_initialize_node(vport, ndlp, did);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) 	ndlp->nlp_flag |= flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) 	if (flag & NLP_UNREG_INP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) 		ndlp->nlp_defer_did = defer_did;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) 	if (phba->sli_rev == LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) 		ndlp->active_rrqs_xri_bitmap = active_rrqs_xri_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) 	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) 	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) 		ndlp->nlp_rpi = rpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) 				 "0008 rpi:%x DID:%x flg:%x refcnt:%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) 				 "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) 				 ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) 				 kref_read(&ndlp->kref),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) 				 ndlp->nlp_usg_map, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) 	if (state != NLP_STE_UNUSED_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) 		lpfc_nlp_set_state(vport, ndlp, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) 				 "0013 rpi:%x DID:%x flg:%x refcnt:%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) 				 "map:%x x%px STATE=UNUSED\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) 				 ndlp->nlp_rpi, ndlp->nlp_DID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) 				 ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) 				 kref_read(&ndlp->kref),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) 				 ndlp->nlp_usg_map, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) 		"node enable:       did:x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) 		ndlp->nlp_DID, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) 	return ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) free_rpi:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) 	if (phba->sli_rev == LPFC_SLI_REV4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) 		lpfc_sli4_free_rpi(vport->phba, rpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) 		ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) 	 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) 	 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) 	 * the ndlp from the vport. The ndlp marked as UNUSED on the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) 	 * until ALL other outstanding threads have completed. We check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) 	 * that the ndlp not already in the UNUSED state before we proceed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) 	if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) 	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) 		lpfc_cleanup_vports_rrqs(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) 		lpfc_unreg_rpi(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) 	lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679)  * Start / ReStart rescue timer for Discovery / RSCN handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) lpfc_set_disctmo(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) 	struct lpfc_hba  *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) 	uint32_t tmo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) 	if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) 		/* For FAN, timeout should be greater than edtov */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) 		tmo = (((phba->fc_edtov + 999) / 1000) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) 		/* Normal discovery timeout should be > than ELS/CT timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) 		 * FC spec states we need 3 * ratov for CT requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) 		tmo = ((phba->fc_ratov * 3) + 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) 	if (!timer_pending(&vport->fc_disctmo)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) 			"set disc timer:  tmo:x%x state:x%x flg:x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) 			tmo, vport->port_state, vport->fc_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) 	mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) 	spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) 	vport->fc_flag |= FC_DISC_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) 	spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) 	/* Start Discovery Timer state <hba_state> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) 			 "0247 Start Discovery Timer state x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) 			 "Data: x%x x%lx x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) 			 vport->port_state, tmo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) 			 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) 			 vport->fc_adisc_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722)  * Cancel rescue timer for Discovery / RSCN handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) lpfc_can_disctmo(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) 	unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) 		"can disc timer:  state:x%x rtry:x%x flg:x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) 		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) 	/* Turn off discovery timer if its running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) 	if (vport->fc_flag & FC_DISC_TMO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) 		spin_lock_irqsave(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) 		vport->fc_flag &= ~FC_DISC_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) 		spin_unlock_irqrestore(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) 		del_timer_sync(&vport->fc_disctmo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) 		spin_lock_irqsave(&vport->work_port_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) 		vport->work_port_events &= ~WORKER_DISC_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) 		spin_unlock_irqrestore(&vport->work_port_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) 	/* Cancel Discovery Timer state <hba_state> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) 			 "0248 Cancel Discovery Timer state x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) 			 "Data: x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) 			 vport->port_state, vport->fc_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) 			 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755)  * Check specified ring for outstanding IOCB on the SLI queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756)  * Return true if iocb matches the specified nport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) lpfc_check_sli_ndlp(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) 		    struct lpfc_sli_ring *pring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) 		    struct lpfc_iocbq *iocb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) 		    struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) 	IOCB_t *icmd = &iocb->iocb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) 	struct lpfc_vport    *vport = ndlp->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) 	if (iocb->vport != vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) 	if (pring->ringno == LPFC_ELS_RING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) 		switch (icmd->ulpCommand) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) 		case CMD_GEN_REQUEST64_CR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) 			if (iocb->context_un.ndlp == ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) 		case CMD_ELS_REQUEST64_CR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) 			if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) 		case CMD_XMIT_ELS_RSP64_CX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) 			if (iocb->context1 == (uint8_t *) ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) 	} else if (pring->ringno == LPFC_FCP_RING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) 		/* Skip match check if waiting to relogin to FCP target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) 		if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) 		    (ndlp->nlp_flag & NLP_DELAY_TMO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) 		if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) __lpfc_dequeue_nport_iocbs(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) 		struct lpfc_nodelist *ndlp, struct lpfc_sli_ring *pring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) 		struct list_head *dequeue_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) 	struct lpfc_iocbq *iocb, *next_iocb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) 	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) 		/* Check to see if iocb matches the nport */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) 		if (lpfc_check_sli_ndlp(phba, pring, iocb, ndlp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) 			/* match, dequeue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) 			list_move_tail(&iocb->list, dequeue_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813) lpfc_sli3_dequeue_nport_iocbs(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) 		struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) 	struct lpfc_sli *psli = &phba->sli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) 	uint32_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) 	for (i = 0; i < psli->num_rings; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) 		__lpfc_dequeue_nport_iocbs(phba, ndlp, &psli->sli3_ring[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) 						dequeue_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) 		struct lpfc_nodelist *ndlp, struct list_head *dequeue_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) 	struct lpfc_sli_ring *pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) 	struct lpfc_queue *qp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) 	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) 		pring = qp->pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) 		if (!pring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) 		spin_lock(&pring->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) 		__lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) 		spin_unlock(&pring->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846)  * Free resources / clean up outstanding I/Os
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847)  * associated with nlp_rpi in the LPFC_NODELIST entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) 	LIST_HEAD(completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) 	lpfc_fabric_abort_nport(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) 	 * Everything that matches on txcmplq will be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) 	 * by firmware with a no rpi error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) 	if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) 		if (phba->sli_rev != LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) 			lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) 			lpfc_sli4_dequeue_nport_iocbs(phba, ndlp, &completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) 	/* Cancel all the IOCBs from the completions list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) 			      IOERR_SLI_ABORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875)  * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876)  * @phba: Pointer to HBA context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877)  * @pmb: Pointer to mailbox object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879)  * This function will issue an ELS LOGO command after completing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880)  * the UNREG_RPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) 	struct lpfc_vport  *vport = pmb->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) 	struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) 	ndlp = (struct lpfc_nodelist *)(pmb->ctx_ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) 	if (!ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) 	lpfc_issue_els_logo(vport, ndlp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) 	mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) 	/* Check to see if there are any deferred events to process */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) 	if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) 	    (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) 				 "1434 UNREG cmpl deferred logo x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) 				 "on NPort x%x Data: x%x x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) 				 ndlp->nlp_rpi, ndlp->nlp_DID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) 				 ndlp->nlp_defer_did, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) 		ndlp->nlp_flag &= ~NLP_UNREG_INP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) 		ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) 		lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) 		if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) 			lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) 			ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) 			ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) 		ndlp->nlp_flag &= ~NLP_UNREG_INP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917)  * Sets the mailbox completion handler to be used for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918)  * unreg_rpi command. The handler varies based on the state of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919)  * the port and what will be happening to the rpi next.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) 	struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) 	unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927) 	if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) 		mbox->ctx_ndlp = ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) 		mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) 	} else if (phba->sli_rev == LPFC_SLI_REV4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) 		   (!(vport->load_flag & FC_UNLOADING)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) 		    (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) 				      LPFC_SLI_INTF_IF_TYPE_2) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) 		    (kref_read(&ndlp->kref) > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) 		mbox->ctx_ndlp = lpfc_nlp_get(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) 		mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) 		if (vport->load_flag & FC_UNLOADING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) 			if (phba->sli_rev == LPFC_SLI_REV4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) 				spin_lock_irqsave(&vport->phba->ndlp_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) 						  iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) 				ndlp->nlp_flag |= NLP_RELEASE_RPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) 				spin_unlock_irqrestore(&vport->phba->ndlp_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) 						       iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) 			lpfc_nlp_get(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) 		mbox->ctx_ndlp = ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955)  * Free rpi associated with LPFC_NODELIST entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956)  * This routine is called from lpfc_freenode(), when we are removing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957)  * a LPFC_NODELIST entry. It is also called if the driver initiates a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958)  * LOGO that completes successfully, and we are waiting to PLOGI back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959)  * to the remote NPort. In addition, it is called after we receive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960)  * and unsolicated ELS cmd, send back a rsp, the rsp completes and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961)  * we are waiting to PLOGI back to the remote NPort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) 	struct lpfc_hba *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) 	LPFC_MBOXQ_t    *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) 	int rc, acc_plogi = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) 	uint16_t rpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) 	if (ndlp->nlp_flag & NLP_RPI_REGISTERED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) 	    ndlp->nlp_flag & NLP_REG_LOGIN_SEND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) 		if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) 			lpfc_printf_vlog(vport, KERN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) 					 LOG_NODE | LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) 					 "3366 RPI x%x needs to be "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) 					 "unregistered nlp_flag x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) 					 "did x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) 					 ndlp->nlp_rpi, ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) 					 ndlp->nlp_DID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) 		/* If there is already an UNREG in progress for this ndlp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) 		 * no need to queue up another one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) 		if (ndlp->nlp_flag & NLP_UNREG_INP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) 			lpfc_printf_vlog(vport, KERN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) 					 LOG_NODE | LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) 					 "1436 unreg_rpi SKIP UNREG x%x on "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) 					 "NPort x%x deferred x%x  flg x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) 					 "Data: x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) 					 ndlp->nlp_rpi, ndlp->nlp_DID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) 					 ndlp->nlp_defer_did,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) 					 ndlp->nlp_flag, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) 		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) 		if (mbox) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) 			/* SLI4 ports require the physical rpi value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) 			rpi = ndlp->nlp_rpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) 			if (phba->sli_rev == LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) 				rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) 			lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) 			mbox->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) 			lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) 			if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) 				 * accept PLOGIs after unreg_rpi_cmpl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) 				acc_plogi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) 			if (((ndlp->nlp_DID & Fabric_DID_MASK) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) 			    Fabric_DID_MASK) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) 			    (!(vport->fc_flag & FC_OFFLINE_MODE)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) 				ndlp->nlp_flag |= NLP_UNREG_INP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) 			lpfc_printf_vlog(vport, KERN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) 					 LOG_NODE | LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) 					 "1433 unreg_rpi UNREG x%x on "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) 					 "NPort x%x deferred flg x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) 					 "Data:x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) 					 ndlp->nlp_rpi, ndlp->nlp_DID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) 					 ndlp->nlp_flag, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) 			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) 			if (rc == MBX_NOT_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) 				mempool_free(mbox, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) 				acc_plogi = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) 			lpfc_printf_vlog(vport, KERN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) 					 LOG_NODE | LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) 					 "1444 Failed to allocate mempool "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) 					 "unreg_rpi UNREG x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) 					 "DID x%x, flag x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) 					 "ndlp x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) 					 ndlp->nlp_rpi, ndlp->nlp_DID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) 					 ndlp->nlp_flag, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) 			/* Because mempool_alloc failed, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) 			 * will issue a LOGO here and keep the rpi alive if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) 			 * not unloading.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) 			if (!(vport->load_flag & FC_UNLOADING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) 				ndlp->nlp_flag &= ~NLP_UNREG_INP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) 				lpfc_issue_els_logo(vport, ndlp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) 				ndlp->nlp_prev_state = ndlp->nlp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) 				lpfc_nlp_set_state(vport, ndlp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) 						   NLP_STE_NPR_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) 		lpfc_no_rpi(phba, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) 		if (phba->sli_rev != LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) 			ndlp->nlp_rpi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) 		ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) 		ndlp->nlp_flag &= ~NLP_NPR_ADISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) 		if (acc_plogi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) 			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) 	ndlp->nlp_flag &= ~NLP_LOGO_ACC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069)  * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072)  * This routine is invoked to unregister all the currently registered RPIs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073)  * to the HBA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) 	struct lpfc_vport **vports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) 	struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) 	struct Scsi_Host *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) 	vports = lpfc_create_vport_work_array(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) 	if (!vports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) 				"2884 Vport array allocation failed \n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) 	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) 		shost = lpfc_shost_from_vport(vports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) 		spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) 		list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) 			if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) 				/* The mempool_alloc might sleep */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) 				spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) 				lpfc_unreg_rpi(vports[i], ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) 				spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) 		spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) 	lpfc_destroy_vport_work_array(phba, vports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) lpfc_unreg_all_rpis(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) 	struct lpfc_hba  *phba  = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) 	LPFC_MBOXQ_t     *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) 	if (phba->sli_rev == LPFC_SLI_REV4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) 		lpfc_sli4_unreg_all_rpis(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) 	if (mbox) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) 		lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) 				 mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) 		mbox->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) 		mbox->ctx_ndlp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) 		if (rc != MBX_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) 			mempool_free(mbox, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) 		if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) 					 "1836 Could not issue "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) 					 "unreg_login(all_rpis) status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) 					 rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) lpfc_unreg_default_rpis(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) 	struct lpfc_hba  *phba  = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) 	LPFC_MBOXQ_t     *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) 	/* Unreg DID is an SLI3 operation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) 	if (phba->sli_rev > LPFC_SLI_REV3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) 	if (mbox) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) 		lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) 			       mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) 		mbox->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) 		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) 		mbox->ctx_ndlp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) 		if (rc != MBX_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) 			mempool_free(mbox, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) 		if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) 			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) 					 "1815 Could not issue "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) 					 "unreg_did (default rpis) status %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) 					 rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167)  * Free resources associated with LPFC_NODELIST entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168)  * so it can be freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) 	struct lpfc_hba  *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) 	LPFC_MBOXQ_t *mb, *nextmb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) 	struct lpfc_dmabuf *mp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) 	unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) 	/* Cleanup node for NPort <nlp_DID> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) 			 "0900 Cleanup node for NPort x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) 			 "Data: x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) 			 ndlp->nlp_DID, ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) 			 ndlp->nlp_state, ndlp->nlp_rpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) 	if (NLP_CHK_FREE_REQ(ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) 				"0280 %s: ndlp:x%px "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) 				"usgmap:x%x refcnt:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) 				__func__, (void *)ndlp, ndlp->nlp_usg_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) 				kref_read(&ndlp->kref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) 		lpfc_dequeue_node(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) 				"0281 %s: ndlp:x%px "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) 				"usgmap:x%x refcnt:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) 				__func__, (void *)ndlp, ndlp->nlp_usg_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) 				kref_read(&ndlp->kref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) 		lpfc_disable_node(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) 	/* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) 	/* cleanup any ndlp on mbox q waiting for reglogin cmpl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) 	if ((mb = phba->sli.mbox_active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) 		   !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) 		   (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) 			mb->ctx_ndlp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) 	/* Cleanup REG_LOGIN completions which are not yet processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) 	list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) 		if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) 			(mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) 			(ndlp != (struct lpfc_nodelist *)mb->ctx_ndlp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) 		mb->ctx_ndlp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) 		mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) 		   !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) 		    (ndlp == (struct lpfc_nodelist *)mb->ctx_ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) 			mp = (struct lpfc_dmabuf *)(mb->ctx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) 			if (mp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) 				__lpfc_mbuf_free(phba, mp->virt, mp->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) 				kfree(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) 			list_del(&mb->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) 			mempool_free(mb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) 			/* We shall not invoke the lpfc_nlp_put to decrement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) 			 * the ndlp reference count as we are in the process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) 			 * of lpfc_nlp_release.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) 	lpfc_els_abort(phba, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) 	spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) 	ndlp->nlp_flag &= ~NLP_DELAY_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) 	spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) 	ndlp->nlp_last_elscmd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) 	del_timer_sync(&ndlp->nlp_delayfunc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) 	list_del_init(&ndlp->els_retry_evt.evt_listp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) 	list_del_init(&ndlp->dev_loss_evt.evt_listp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) 	list_del_init(&ndlp->recovery_evt.evt_listp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) 	lpfc_cleanup_vports_rrqs(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) 	if (phba->sli_rev == LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) 		ndlp->nlp_flag |= NLP_RELEASE_RPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) 	if (!lpfc_unreg_rpi(vport, ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) 		/* Clean up unregistered and non freed rpis */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) 		if ((ndlp->nlp_flag & NLP_RELEASE_RPI) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) 		    !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) 			lpfc_sli4_free_rpi(vport->phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) 					   ndlp->nlp_rpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) 			spin_lock_irqsave(&vport->phba->ndlp_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) 					  iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) 			ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) 			ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) 			spin_unlock_irqrestore(&vport->phba->ndlp_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) 					       iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278)  * Check to see if we can free the nlp back to the freelist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279)  * If we are in the middle of using the nlp in the discovery state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280)  * machine, defer the free till we reach the end of the state machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) 	struct lpfc_hba  *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) 	struct lpfc_rport_data *rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) 	struct fc_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) 	LPFC_MBOXQ_t *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) 	lpfc_cancel_retry_delay_tmo(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) 	if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) 	    !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) 	    !(ndlp->nlp_flag & NLP_RPI_REGISTERED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) 	    phba->sli_rev != LPFC_SLI_REV4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) 		/* For this case we need to cleanup the default rpi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) 		 * allocated by the firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) 		lpfc_printf_vlog(vport, KERN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) 				 LOG_NODE | LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) 				 "0005 Cleanup Default rpi:x%x DID:x%x flg:x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) 				 "ref %d map:x%x ndlp x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) 				 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) 				 kref_read(&ndlp->kref),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) 				 ndlp->nlp_usg_map, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) 		if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) 			!= NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) 			rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) 			    (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) 			if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) 				mempool_free(mbox, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) 			else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) 				mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) 				mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) 				mbox->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) 				mbox->ctx_ndlp = ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) 				rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) 				if (rc == MBX_NOT_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) 					mempool_free(mbox, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) 	lpfc_cleanup_node(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) 	 * ndlp->rport must be set to NULL before it reaches here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) 	 * i.e. break rport/node link before doing lpfc_nlp_put for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) 	 * registered rport and then drop the reference of rport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) 	if (ndlp->rport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) 		 * extra lpfc_nlp_put dropped the reference of ndlp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) 		 * for registered rport so need to cleanup rport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) 				"0940 removed node x%px DID x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) 				"rpi %d rport not null x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) 				 ndlp, ndlp->nlp_DID, ndlp->nlp_rpi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) 				 ndlp->rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) 		rport = ndlp->rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) 		rdata = rport->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) 		rdata->pnode = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) 		ndlp->rport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) 		put_device(&rport->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) 	      uint32_t did)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) 	D_ID mydid, ndlpdid, matchdid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) 	if (did == Bcast_DID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) 	/* First check for Direct match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) 	if (ndlp->nlp_DID == did)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) 	/* Next check for area/domain identically equals 0 match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) 	mydid.un.word = vport->fc_myDID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) 	if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) 	matchdid.un.word = did;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) 	ndlpdid.un.word = ndlp->nlp_DID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) 	if (matchdid.un.b.id == ndlpdid.un.b.id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) 		if ((mydid.un.b.domain == matchdid.un.b.domain) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) 		    (mydid.un.b.area == matchdid.un.b.area)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) 			/* This code is supposed to match the ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) 			 * for a private loop device that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) 			 * connect to fl_port. But we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) 			 * check that the port did not just go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) 			 * from pt2pt to fabric or we could end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) 			 * up matching ndlp->nlp_DID 000001 to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) 			 * fabric DID 0x20101
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) 			if ((ndlpdid.un.b.domain == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) 			    (ndlpdid.un.b.area == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) 				if (ndlpdid.un.b.id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) 				    vport->phba->fc_topology ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) 				    LPFC_TOPOLOGY_LOOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) 					return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) 		matchdid.un.word = ndlp->nlp_DID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) 		if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) 		    (mydid.un.b.area == ndlpdid.un.b.area)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) 			if ((matchdid.un.b.domain == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) 			    (matchdid.un.b.area == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) 				if (matchdid.un.b.id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) 					return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) /* Search for a nodelist entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) static struct lpfc_nodelist *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) 	struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) 	uint32_t data1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) 		if (lpfc_matchdid(vport, ndlp, did)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) 			data1 = (((uint32_t)ndlp->nlp_state << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) 				 ((uint32_t)ndlp->nlp_xri << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) 				 ((uint32_t)ndlp->nlp_type << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) 				 ((uint32_t)ndlp->nlp_usg_map & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) 			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) 					 "0929 FIND node DID "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) 					 "Data: x%px x%x x%x x%x x%x x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) 					 ndlp, ndlp->nlp_DID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) 					 ndlp->nlp_flag, data1, ndlp->nlp_rpi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) 					 ndlp->active_rrqs_xri_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) 			return ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) 	/* FIND node did <did> NOT FOUND */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) 			 "0932 FIND node did x%x NOT FOUND.\n", did);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) struct lpfc_nodelist *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) 	struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) 	unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) 	spin_lock_irqsave(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) 	ndlp = __lpfc_findnode_did(vport, did);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) 	spin_unlock_irqrestore(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) 	return ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) struct lpfc_nodelist *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) lpfc_findnode_mapped(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) 	struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) 	uint32_t data1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) 	unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) 	spin_lock_irqsave(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) 		if (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) 		    ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) 			data1 = (((uint32_t)ndlp->nlp_state << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) 				 ((uint32_t)ndlp->nlp_xri << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) 				 ((uint32_t)ndlp->nlp_type << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) 				 ((uint32_t)ndlp->nlp_rpi & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) 			spin_unlock_irqrestore(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) 			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) 					 "2025 FIND node DID "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) 					 "Data: x%px x%x x%x x%x x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) 					 ndlp, ndlp->nlp_DID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) 					 ndlp->nlp_flag, data1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) 					 ndlp->active_rrqs_xri_bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) 			return ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) 	spin_unlock_irqrestore(shost->host_lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) 	/* FIND node did <did> NOT FOUND */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) 			 "2026 FIND mapped did NOT FOUND.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) struct lpfc_nodelist *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) 	struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) 	ndlp = lpfc_findnode_did(vport, did);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) 	if (!ndlp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) 		if (vport->phba->nvmet_support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) 		if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) 		    lpfc_rscn_payload_check(vport, did) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) 		ndlp = lpfc_nlp_init(vport, did);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) 		if (!ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) 				 "6453 Setup New Node 2B_DISC x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) 				 "Data:x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) 				 ndlp->nlp_DID, ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) 				 ndlp->nlp_state, vport->fc_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) 		spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) 		spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) 		return ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) 	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) 		if (vport->phba->nvmet_support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) 		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514) 		if (!ndlp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) 					 "0014 Could not enable ndlp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) 				 "6454 Setup Enabled Node 2B_DISC x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) 				 "Data:x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) 				 ndlp->nlp_DID, ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) 				 ndlp->nlp_state, vport->fc_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) 		spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) 		spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) 		return ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531) 	/* The NVME Target does not want to actively manage an rport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) 	 * The goal is to allow the target to reset its state and clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) 	 * pending IO in preparation for the initiator to recover.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) 	if ((vport->fc_flag & FC_RSCN_MODE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536) 	    !(vport->fc_flag & FC_NDISC_ACTIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) 		if (lpfc_rscn_payload_check(vport, did)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) 			/* Since this node is marked for discovery,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) 			 * delay timeout is not needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) 			lpfc_cancel_retry_delay_tmo(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) 					 "6455 Setup RSCN Node 2B_DISC x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) 					 "Data:x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) 					 ndlp->nlp_DID, ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) 					 ndlp->nlp_state, vport->fc_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) 			/* NVME Target mode waits until rport is known to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) 			 * impacted by the RSCN before it transitions.  No
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) 			 * active management - just go to NPR provided the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) 			 * node had a valid login.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) 			if (vport->phba->nvmet_support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) 				return ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) 			/* If we've already received a PLOGI from this NPort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) 			 * we don't need to try to discover it again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) 			if (ndlp->nlp_flag & NLP_RCV_PLOGI &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) 			    !(ndlp->nlp_type &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) 			     (NLP_FCP_TARGET | NLP_NVME_TARGET)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) 			ndlp->nlp_prev_state = ndlp->nlp_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) 			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) 			spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) 			ndlp->nlp_flag |= NLP_NPR_2B_DISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) 			spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) 			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) 					 "6456 Skip Setup RSCN Node x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) 					 "Data:x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) 					 ndlp->nlp_DID, ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) 					 ndlp->nlp_state, vport->fc_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) 			ndlp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) 		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) 				 "6457 Setup Active Node 2B_DISC x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) 				 "Data:x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) 				 ndlp->nlp_DID, ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) 				 ndlp->nlp_state, vport->fc_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) 		/* If the initiator received a PLOGI from this NPort or if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) 		 * initiator is already in the process of discovery on it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) 		 * there's no need to try to discover it again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591) 		if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) 		    ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) 		    (!vport->phba->nvmet_support &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) 		     ndlp->nlp_flag & NLP_RCV_PLOGI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) 		if (vport->phba->nvmet_support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) 			return ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) 		/* Moving to NPR state clears unsolicited flags and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) 		 * allows for rediscovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) 		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) 		spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) 		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) 		spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) 	return ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) /* Build a list of nodes to discover based on the loopmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) lpfc_disc_list_loopmap(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) 	struct lpfc_hba  *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) 	uint32_t alpa, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) 	if (!lpfc_is_link_up(phba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) 	if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) 	/* Check for loop map present or not */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) 	if (phba->alpa_map[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) 		for (j = 1; j <= phba->alpa_map[0]; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) 			alpa = phba->alpa_map[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) 			if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) 			lpfc_setup_disc_node(vport, alpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) 		/* No alpamap, so try all alpa's */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) 		for (j = 0; j < FC_MAXLOOP; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) 			/* If cfg_scan_down is set, start from highest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) 			 * ALPA (0xef) to lowest (0x1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) 			if (vport->cfg_scan_down)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) 				index = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) 				index = FC_MAXLOOP - j - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) 			alpa = lpfcAlpaArray[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) 			if ((vport->fc_myDID & 0xff) == alpa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) 			lpfc_setup_disc_node(vport, alpa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) /* SLI3 only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) 	LPFC_MBOXQ_t *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) 	struct lpfc_sli *psli = &phba->sli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) 	struct lpfc_sli_ring *extra_ring = &psli->sli3_ring[LPFC_EXTRA_RING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) 	struct lpfc_sli_ring *fcp_ring   = &psli->sli3_ring[LPFC_FCP_RING];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) 	int  rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) 	 * if it's not a physical port or if we already send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) 	 * clear_la then don't send it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) 	if ((phba->link_state >= LPFC_CLEAR_LA) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668) 	    (vport->port_type != LPFC_PHYSICAL_PORT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) 		(phba->sli_rev == LPFC_SLI_REV4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) 			/* Link up discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673) 	if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) 		phba->link_state = LPFC_CLEAR_LA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) 		lpfc_clear_la(phba, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) 		mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) 		mbox->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) 		if (rc == MBX_NOT_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) 			mempool_free(mbox, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681) 			lpfc_disc_flush_list(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) 			extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) 			fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) 			phba->link_state = LPFC_HBA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689) /* Reg_vpi to tell firmware to resume normal operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) 	LPFC_MBOXQ_t *regvpimbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) 	regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) 	if (regvpimbox) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) 		lpfc_reg_vpi(vport, regvpimbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) 		regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) 		regvpimbox->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) 		if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) 					== MBX_NOT_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) 			mempool_free(regvpimbox, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) /* Start Link up / RSCN discovery on NPR nodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) lpfc_disc_start(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) 	struct lpfc_hba  *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713) 	uint32_t num_sent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) 	uint32_t clear_la_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) 	if (!lpfc_is_link_up(phba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) 		lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) 				 "3315 Link is not up %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) 				 phba->link_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) 	if (phba->link_state == LPFC_CLEAR_LA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) 		clear_la_pending = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) 		clear_la_pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) 	if (vport->port_state < LPFC_VPORT_READY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) 		vport->port_state = LPFC_DISC_AUTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) 	lpfc_set_disctmo(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) 	vport->fc_prevDID = vport->fc_myDID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) 	vport->num_disc_nodes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) 	/* Start Discovery state <hba_state> */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) 	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) 			 "0202 Start Discovery port state x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) 			 "flg x%x Data: x%x x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) 			 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) 			 vport->fc_adisc_cnt, vport->fc_npr_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) 	/* First do ADISCs - if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) 	num_sent = lpfc_els_disc_adisc(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) 	if (num_sent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) 	/* Register the VPI for SLI3, NPIV only. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) 	    !(vport->fc_flag & FC_PT2PT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) 	    !(vport->fc_flag & FC_RSCN_MODE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) 	    (phba->sli_rev < LPFC_SLI_REV4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) 		lpfc_issue_clear_la(phba, vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) 		lpfc_issue_reg_vpi(phba, vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) 	 * For SLI2, we need to set port_state to READY and continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) 	 * discovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) 	if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) 		/* If we get here, there is nothing to ADISC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) 		lpfc_issue_clear_la(phba, vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767) 		if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) 			vport->num_disc_nodes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) 			/* go thru NPR nodes and issue ELS PLOGIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) 			if (vport->fc_npr_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) 				lpfc_els_disc_plogi(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) 			if (!vport->num_disc_nodes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) 				spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) 				vport->fc_flag &= ~FC_NDISC_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) 				spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) 				lpfc_can_disctmo(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) 		vport->port_state = LPFC_VPORT_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) 		/* Next do PLOGIs - if any */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) 		num_sent = lpfc_els_disc_plogi(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) 		if (num_sent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) 		if (vport->fc_flag & FC_RSCN_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) 			/* Check to see if more RSCNs came in while we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) 			 * were processing this one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) 			if ((vport->fc_rscn_id_cnt == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) 			    (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) 				spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795) 				vport->fc_flag &= ~FC_RSCN_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) 				spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) 				lpfc_can_disctmo(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) 				lpfc_els_handle_rscn(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806)  *  Ignore completion for all IOCBs on tx and txcmpl queue for ELS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807)  *  ring the match the sppecified nodelist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) 	LIST_HEAD(completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) 	IOCB_t     *icmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) 	struct lpfc_iocbq    *iocb, *next_iocb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) 	struct lpfc_sli_ring *pring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) 	pring = lpfc_phba_elsring(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) 	if (unlikely(!pring))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) 	/* Error matching iocb on txq or txcmplq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) 	 * First check the txq.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) 	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826) 		if (iocb->context1 != ndlp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) 		icmd = &iocb->iocb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) 		if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) 		    (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) 			list_move_tail(&iocb->list, &completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) 	/* Next check the txcmplq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) 		if (iocb->context1 != ndlp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) 		icmd = &iocb->iocb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) 		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844) 		    icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) 			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) 	/* Cancel all the IOCBs from the completions list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) 			      IOERR_SLI_ABORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) lpfc_disc_flush_list(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) 	struct lpfc_nodelist *ndlp, *next_ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859) 	struct lpfc_hba *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861) 	if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) 		list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) 					 nlp_listp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) 			if (!NLP_CHK_NODE_ACT(ndlp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) 			if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) 			    ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868) 				lpfc_free_tx(phba, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875) lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877) 	lpfc_els_flush_rscn(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878) 	lpfc_els_flush_cmd(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879) 	lpfc_disc_flush_list(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) /*****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884)  * NAME:     lpfc_disc_timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886)  * FUNCTION: Fibre Channel driver discovery timeout routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888)  * EXECUTION ENVIRONMENT: interrupt only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890)  * CALLED FROM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891)  *      Timer function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893)  * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894)  *      none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) /*****************************************************************************/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898) lpfc_disc_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) 	struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901) 	struct lpfc_hba   *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) 	uint32_t tmo_posted;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) 	unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) 	if (unlikely(!phba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908) 	spin_lock_irqsave(&vport->work_port_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) 	tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) 	if (!tmo_posted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) 		vport->work_port_events |= WORKER_DISC_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912) 	spin_unlock_irqrestore(&vport->work_port_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914) 	if (!tmo_posted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) 		lpfc_worker_wake_up(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) lpfc_disc_timeout_handler(struct lpfc_vport *vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) 	struct lpfc_hba  *phba = vport->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924) 	struct lpfc_sli  *psli = &phba->sli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925) 	struct lpfc_nodelist *ndlp, *next_ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) 	LPFC_MBOXQ_t *initlinkmbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927) 	int rc, clrlaerr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929) 	if (!(vport->fc_flag & FC_DISC_TMO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932) 	spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933) 	vport->fc_flag &= ~FC_DISC_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934) 	spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936) 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937) 		"disc timeout:    state:x%x rtry:x%x flg:x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938) 		vport->port_state, vport->fc_ns_retry, vport->fc_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) 	switch (vport->port_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942) 	case LPFC_LOCAL_CFG_LINK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) 		 * port_state is identically  LPFC_LOCAL_CFG_LINK while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945) 		 * waiting for FAN timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947) 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948) 				 "0221 FAN timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950) 		/* Start discovery by sending FLOGI, clean up old rpis */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) 		list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952) 					 nlp_listp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) 			if (!NLP_CHK_NODE_ACT(ndlp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) 			if (ndlp->nlp_state != NLP_STE_NPR_NODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957) 			if (ndlp->nlp_type & NLP_FABRIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) 				/* Clean up the ndlp on Fabric connections */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959) 				lpfc_drop_node(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961) 			} else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962) 				/* Fail outstanding IO now since device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) 				 * is marked for PLOGI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965) 				lpfc_unreg_rpi(vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968) 		if (vport->port_state != LPFC_FLOGI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969) 			if (phba->sli_rev <= LPFC_SLI_REV3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970) 				lpfc_initial_flogi(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) 				lpfc_issue_init_vfi(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977) 	case LPFC_FDISC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978) 	case LPFC_FLOGI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979) 	/* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980) 		/* Initial FLOGI timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981) 		lpfc_printf_vlog(vport, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) 				 LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983) 				 "0222 Initial %s timeout\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984) 				 vport->vpi ? "FDISC" : "FLOGI");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) 		/* Assume no Fabric and go on with discovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) 		 * Check for outstanding ELS FLOGI to abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990) 		/* FLOGI failed, so just use loop map to make discovery list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991) 		lpfc_disc_list_loopmap(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) 		/* Start discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994) 		lpfc_disc_start(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) 	case LPFC_FABRIC_CFG_LINK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998) 	/* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999) 	   NameServer login */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000) 		lpfc_printf_vlog(vport, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) 				 LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002) 				 "0223 Timeout while waiting for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003) 				 "NameServer login\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004) 		/* Next look for NameServer ndlp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005) 		ndlp = lpfc_findnode_did(vport, NameServer_DID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006) 		if (ndlp && NLP_CHK_NODE_ACT(ndlp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) 			lpfc_els_abort(phba, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) 		/* ReStart discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) 		goto restart_disc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) 	case LPFC_NS_QRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013) 	/* Check for wait for NameServer Rsp timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014) 		lpfc_printf_vlog(vport, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015) 				 LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016) 				 "0224 NameServer Query timeout "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017) 				 "Data: x%x x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) 				 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020) 		if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) 			/* Try it one more time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022) 			vport->fc_ns_retry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) 			vport->gidft_inp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024) 			rc = lpfc_issue_gidft(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) 			if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028) 		vport->fc_ns_retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) restart_disc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) 		 * Discovery is over.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033) 		 * set port_state to PORT_READY if SLI2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) 		 * cmpl_reg_vpi will set port_state to READY for SLI3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) 		if (phba->sli_rev < LPFC_SLI_REV4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) 			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038) 				lpfc_issue_reg_vpi(phba, vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) 			else  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040) 				lpfc_issue_clear_la(phba, vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041) 				vport->port_state = LPFC_VPORT_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) 		/* Setup and issue mailbox INITIALIZE LINK command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046) 		initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047) 		if (!initlinkmbox) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) 			lpfc_printf_vlog(vport, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) 					 LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050) 					 "0206 Device Discovery "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051) 					 "completion error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052) 			phba->link_state = LPFC_HBA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056) 		lpfc_linkdown(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057) 		lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058) 			       phba->cfg_link_speed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059) 		initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060) 		initlinkmbox->vport = vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) 		initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062) 		rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063) 		lpfc_set_loopback_flag(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064) 		if (rc == MBX_NOT_FINISHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065) 			mempool_free(initlinkmbox, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069) 	case LPFC_DISC_AUTH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) 	/* Node Authentication timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071) 		lpfc_printf_vlog(vport, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072) 				 LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073) 				 "0227 Node Authentication timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074) 		lpfc_disc_flush_list(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) 		 * set port_state to PORT_READY if SLI2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078) 		 * cmpl_reg_vpi will set port_state to READY for SLI3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080) 		if (phba->sli_rev < LPFC_SLI_REV4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081) 			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) 				lpfc_issue_reg_vpi(phba, vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083) 			else  {	/* NPIV Not enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084) 				lpfc_issue_clear_la(phba, vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) 				vport->port_state = LPFC_VPORT_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) 	case LPFC_VPORT_READY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) 		if (vport->fc_flag & FC_RSCN_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092) 			lpfc_printf_vlog(vport, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093) 					 LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094) 					 "0231 RSCN timeout Data: x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) 					 "x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096) 					 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) 			/* Cleanup any outstanding ELS commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) 			lpfc_els_flush_cmd(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101) 			lpfc_els_flush_rscn(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) 			lpfc_disc_flush_list(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107) 		lpfc_printf_vlog(vport, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6108) 				 LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6109) 				 "0273 Unexpected discovery timeout, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6110) 				 "vport State x%x\n", vport->port_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6111) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6114) 	switch (phba->link_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6115) 	case LPFC_CLEAR_LA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6116) 				/* CLEAR LA timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6117) 		lpfc_printf_vlog(vport, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6118) 				 LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6119) 				 "0228 CLEAR LA timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6120) 		clrlaerr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6121) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6123) 	case LPFC_LINK_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6124) 		lpfc_issue_clear_la(phba, vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6125) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6126) 	case LPFC_LINK_UNKNOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6127) 	case LPFC_WARM_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6128) 	case LPFC_INIT_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6129) 	case LPFC_INIT_MBX_CMDS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6130) 	case LPFC_LINK_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6131) 	case LPFC_HBA_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6132) 		lpfc_printf_vlog(vport, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6133) 				 LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6134) 				 "0230 Unexpected timeout, hba link "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6135) 				 "state x%x\n", phba->link_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6136) 		clrlaerr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6137) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6139) 	case LPFC_HBA_READY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6140) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6143) 	if (clrlaerr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6144) 		lpfc_disc_flush_list(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6145) 		if (phba->sli_rev != LPFC_SLI_REV4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6146) 			psli->sli3_ring[(LPFC_EXTRA_RING)].flag &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6147) 				~LPFC_STOP_IOCB_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6148) 			psli->sli3_ring[LPFC_FCP_RING].flag &=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6149) 				~LPFC_STOP_IOCB_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6150) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6151) 		vport->port_state = LPFC_VPORT_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6153) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6157)  * This routine handles processing a NameServer REG_LOGIN mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6158)  * command upon completion. It is setup in the LPFC_MBOXQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6159)  * as the completion routine when the command is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6160)  * handed off to the SLI layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6161)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6162) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6163) lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6165) 	MAILBOX_t *mb = &pmb->u.mb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6166) 	struct lpfc_dmabuf   *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6167) 	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *)pmb->ctx_ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6168) 	struct lpfc_vport    *vport = pmb->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6170) 	pmb->ctx_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6171) 	pmb->ctx_ndlp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6173) 	if (phba->sli_rev < LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6174) 		ndlp->nlp_rpi = mb->un.varWords[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6175) 	ndlp->nlp_flag |= NLP_RPI_REGISTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6176) 	ndlp->nlp_type |= NLP_FABRIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6177) 	lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6178) 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6179) 			 "0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6180) 			 ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6181) 			 kref_read(&ndlp->kref),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6182) 			 ndlp->nlp_usg_map, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6183) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6184) 	 * Start issuing Fabric-Device Management Interface (FDMI) command to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6185) 	 * 0xfffffa (FDMI well known port).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6186) 	 * DHBA -> DPRT -> RHBA -> RPA  (physical port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6187) 	 * DPRT -> RPRT (vports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6188) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6189) 	if (vport->port_type == LPFC_PHYSICAL_PORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6190) 		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6191) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6192) 		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6195) 	/* decrement the node reference count held for this callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6196) 	 * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6197) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6198) 	lpfc_nlp_put(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6199) 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6200) 	kfree(mp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6201) 	mempool_free(pmb, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6203) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6206) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6207) lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6209) 	uint16_t *rpi = param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6211) 	/* check for active node */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6212) 	if (!NLP_CHK_NODE_ACT(ndlp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6213) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6215) 	return ndlp->nlp_rpi == *rpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6218) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6219) lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6221) 	return memcmp(&ndlp->nlp_portname, param,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6222) 		      sizeof(ndlp->nlp_portname)) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6225) static struct lpfc_nodelist *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6226) __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6228) 	struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6230) 	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6231) 		if (filter(ndlp, param)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6232) 			lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6233) 					 "3185 FIND node filter %ps DID "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6234) 					 "ndlp x%px did x%x flg x%x st x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6235) 					 "xri x%x type x%x rpi x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6236) 					 filter, ndlp, ndlp->nlp_DID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6237) 					 ndlp->nlp_flag, ndlp->nlp_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6238) 					 ndlp->nlp_xri, ndlp->nlp_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6239) 					 ndlp->nlp_rpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6240) 			return ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6241) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6243) 	lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6244) 			 "3186 FIND node filter %ps NOT FOUND.\n", filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6245) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6248) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6249)  * This routine looks up the ndlp lists for the given RPI. If rpi found it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6250)  * returns the node list element pointer else return NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6251)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6252) struct lpfc_nodelist *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6253) __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6255) 	return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6258) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6259)  * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6260)  * returns the node element list pointer else return NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6261)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6262) struct lpfc_nodelist *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6263) lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6265) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6266) 	struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6268) 	spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6269) 	ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6270) 	spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6271) 	return ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6274) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6275)  * This routine looks up the ndlp lists for the given RPI. If the rpi
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6276)  * is found, the routine returns the node element list pointer else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6277)  * return NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6278)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6279) struct lpfc_nodelist *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6280) lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6282) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6283) 	struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6284) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6286) 	spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6287) 	ndlp = __lpfc_findnode_rpi(vport, rpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6288) 	spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6289) 	return ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6292) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6293)  * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6294)  * @phba: pointer to lpfc hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6295)  * @vpi: the physical host virtual N_Port identifier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6296)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6297)  * This routine finds a vport on a HBA (referred by @phba) through a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6298)  * @vpi. The function walks the HBA's vport list and returns the address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6299)  * of the vport with the matching @vpi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6300)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6301)  * Return code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6302)  *    NULL - No vport with the matching @vpi found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6303)  *    Otherwise - Address to the vport with the matching @vpi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6304)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6305) struct lpfc_vport *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6306) lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6308) 	struct lpfc_vport *vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6309) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6310) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6312) 	/* The physical ports are always vpi 0 - translate is unnecessary. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6313) 	if (vpi > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6314) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6315) 		 * Translate the physical vpi to the logical vpi.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6316) 		 * vport stores the logical vpi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6317) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6318) 		for (i = 0; i < phba->max_vpi; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6319) 			if (vpi == phba->vpi_ids[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6320) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6321) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6323) 		if (i >= phba->max_vpi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6324) 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6325) 					"2936 Could not find Vport mapped "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6326) 					"to vpi %d\n", vpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6327) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6328) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6331) 	spin_lock_irqsave(&phba->port_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6332) 	list_for_each_entry(vport, &phba->port_list, listentry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6333) 		if (vport->vpi == i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6334) 			spin_unlock_irqrestore(&phba->port_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6335) 			return vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6336) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6338) 	spin_unlock_irqrestore(&phba->port_list_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6339) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6342) struct lpfc_nodelist *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6343) lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6345) 	struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6346) 	int rpi = LPFC_RPI_ALLOC_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6348) 	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6349) 		rpi = lpfc_sli4_alloc_rpi(vport->phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6350) 		if (rpi == LPFC_RPI_ALLOC_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6351) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6352) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6354) 	ndlp = mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6355) 	if (!ndlp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6356) 		if (vport->phba->sli_rev == LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6357) 			lpfc_sli4_free_rpi(vport->phba, rpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6358) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6359) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6361) 	memset(ndlp, 0, sizeof (struct lpfc_nodelist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6363) 	lpfc_initialize_node(vport, ndlp, did);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6364) 	INIT_LIST_HEAD(&ndlp->nlp_listp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6365) 	if (vport->phba->sli_rev == LPFC_SLI_REV4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6366) 		ndlp->nlp_rpi = rpi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6367) 		lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6368) 				 "0007 Init New ndlp x%px, rpi:x%x DID:%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6369) 				 "flg:x%x refcnt:%d map:x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6370) 				 ndlp, ndlp->nlp_rpi, ndlp->nlp_DID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6371) 				 ndlp->nlp_flag, kref_read(&ndlp->kref),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6372) 				 ndlp->nlp_usg_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6374) 		ndlp->active_rrqs_xri_bitmap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6375) 				mempool_alloc(vport->phba->active_rrq_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6376) 					      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6377) 		if (ndlp->active_rrqs_xri_bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6378) 			memset(ndlp->active_rrqs_xri_bitmap, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6379) 			       ndlp->phba->cfg_rrq_xri_bitmap_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6384) 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6385) 		"node init:       did:x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6386) 		ndlp->nlp_DID, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6388) 	return ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6391) /* This routine releases all resources associated with a specifc NPort's ndlp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6392)  * and mempool_free's the nodelist.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6393)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6394) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6395) lpfc_nlp_release(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6397) 	struct lpfc_hba *phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6398) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6399) 	struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6400) 						  kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6402) 	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6403) 		"node release:    did:x%x flg:x%x type:x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6404) 		ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6406) 	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6407) 			"0279 %s: ndlp:x%px did %x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6408) 			"usgmap:x%x refcnt:%d rpi:%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6409) 			__func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6410) 			(void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6411) 			kref_read(&ndlp->kref), ndlp->nlp_rpi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6413) 	/* remove ndlp from action. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6414) 	lpfc_nlp_remove(ndlp->vport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6416) 	/* clear the ndlp active flag for all release cases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6417) 	phba = ndlp->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6418) 	spin_lock_irqsave(&phba->ndlp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6419) 	NLP_CLR_NODE_ACT(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6420) 	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6422) 	/* free ndlp memory for final ndlp release */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6423) 	if (NLP_CHK_FREE_REQ(ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6424) 		kfree(ndlp->lat_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6425) 		if (phba->sli_rev == LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6426) 			mempool_free(ndlp->active_rrqs_xri_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6427) 				     ndlp->phba->active_rrq_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6428) 		mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6432) /* This routine bumps the reference count for a ndlp structure to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6433)  * that one discovery thread won't free a ndlp while another discovery thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6434)  * is using it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6435)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6436) struct lpfc_nodelist *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6437) lpfc_nlp_get(struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6439) 	struct lpfc_hba *phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6440) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6442) 	if (ndlp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6443) 		lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6444) 			"node get:        did:x%x flg:x%x refcnt:x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6445) 			ndlp->nlp_DID, ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6446) 			kref_read(&ndlp->kref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6447) 		/* The check of ndlp usage to prevent incrementing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6448) 		 * ndlp reference count that is in the process of being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6449) 		 * released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6450) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6451) 		phba = ndlp->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6452) 		spin_lock_irqsave(&phba->ndlp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6453) 		if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6454) 			spin_unlock_irqrestore(&phba->ndlp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6455) 			lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6456) 				"0276 %s: ndlp:x%px "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6457) 				"usgmap:x%x refcnt:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6458) 				__func__, (void *)ndlp, ndlp->nlp_usg_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6459) 				kref_read(&ndlp->kref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6460) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6461) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6462) 			kref_get(&ndlp->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6463) 		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6465) 	return ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6468) /* This routine decrements the reference count for a ndlp structure. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6469)  * count goes to 0, this indicates the the associated nodelist should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6470)  * freed. Returning 1 indicates the ndlp resource has been released; on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6471)  * other hand, returning 0 indicates the ndlp resource has not been released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6472)  * yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6473)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6474) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6475) lpfc_nlp_put(struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6477) 	struct lpfc_hba *phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6478) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6480) 	if (!ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6481) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6483) 	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6484) 			"node put:        did:x%x flg:x%x refcnt:x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6485) 			ndlp->nlp_DID, ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6486) 			kref_read(&ndlp->kref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6487) 	phba = ndlp->phba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6488) 	spin_lock_irqsave(&phba->ndlp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6489) 	/* Check the ndlp memory free acknowledge flag to avoid the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6490) 	 * possible race condition that kref_put got invoked again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6491) 	 * after previous one has done ndlp memory free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6492) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6493) 	if (NLP_CHK_FREE_ACK(ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6494) 		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6495) 		lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6496) 				"0274 %s: ndlp:x%px "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6497) 				"usgmap:x%x refcnt:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6498) 				__func__, (void *)ndlp, ndlp->nlp_usg_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6499) 				kref_read(&ndlp->kref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6500) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6502) 	/* Check the ndlp inactivate log flag to avoid the possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6503) 	 * race condition that kref_put got invoked again after ndlp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6504) 	 * is already in inactivating state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6505) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6506) 	if (NLP_CHK_IACT_REQ(ndlp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6507) 		spin_unlock_irqrestore(&phba->ndlp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6508) 		lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6509) 				"0275 %s: ndlp:x%px "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6510) 				"usgmap:x%x refcnt:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6511) 				__func__, (void *)ndlp, ndlp->nlp_usg_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6512) 				kref_read(&ndlp->kref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6513) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6514) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6515) 	/* For last put, mark the ndlp usage flags to make sure no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6516) 	 * other kref_get and kref_put on the same ndlp shall get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6517) 	 * in between the process when the final kref_put has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6518) 	 * invoked on this ndlp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6519) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6520) 	if (kref_read(&ndlp->kref) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6521) 		/* Indicate ndlp is put to inactive state. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6522) 		NLP_SET_IACT_REQ(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6523) 		/* Acknowledge ndlp memory free has been seen. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6524) 		if (NLP_CHK_FREE_REQ(ndlp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6525) 			NLP_SET_FREE_ACK(ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6527) 	spin_unlock_irqrestore(&phba->ndlp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6528) 	/* Note, the kref_put returns 1 when decrementing a reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6529) 	 * count that was 1, it invokes the release callback function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6530) 	 * but it still left the reference count as 1 (not actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6531) 	 * performs the last decrementation). Otherwise, it actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6532) 	 * decrements the reference count and returns 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6533) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6534) 	return kref_put(&ndlp->kref, lpfc_nlp_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6537) /* This routine free's the specified nodelist if it is not in use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6538)  * by any other discovery thread. This routine returns 1 if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6539)  * ndlp has been freed. A return value of 0 indicates the ndlp is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6540)  * not yet been released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6541)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6542) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6543) lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6544) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6545) 	lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6546) 		"node not used:   did:x%x flg:x%x refcnt:x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6547) 		ndlp->nlp_DID, ndlp->nlp_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6548) 		kref_read(&ndlp->kref));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6549) 	if (kref_read(&ndlp->kref) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6550) 		if (lpfc_nlp_put(ndlp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6551) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6552) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6555) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6556)  * lpfc_fcf_inuse - Check if FCF can be unregistered.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6557)  * @phba: Pointer to hba context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6558)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6559)  * This function iterate through all FC nodes associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6560)  * will all vports to check if there is any node with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6561)  * fc_rports associated with it. If there is an fc_rport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6562)  * associated with the node, then the node is either in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6563)  * discovered state or its devloss_timer is pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6564)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6565) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6566) lpfc_fcf_inuse(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6568) 	struct lpfc_vport **vports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6569) 	int i, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6570) 	struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6571) 	struct Scsi_Host  *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6573) 	vports = lpfc_create_vport_work_array(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6575) 	/* If driver cannot allocate memory, indicate fcf is in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6576) 	if (!vports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6577) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6579) 	for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6580) 		shost = lpfc_shost_from_vport(vports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6581) 		spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6582) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6583) 		 * IF the CVL_RCVD bit is not set then we have sent the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6584) 		 * flogi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6585) 		 * If dev_loss fires while we are waiting we do not want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6586) 		 * unreg the fcf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6587) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6588) 		if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6589) 			spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6590) 			ret =  1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6591) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6592) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6593) 		list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6594) 			if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6595) 			  (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6596) 				ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6597) 				spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6598) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6599) 			} else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6600) 				ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6601) 				lpfc_printf_log(phba, KERN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6602) 						LOG_NODE | LOG_DISCOVERY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6603) 						"2624 RPI %x DID %x flag %x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6604) 						"still logged in\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6605) 						ndlp->nlp_rpi, ndlp->nlp_DID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6606) 						ndlp->nlp_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6607) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6608) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6609) 		spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6610) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6611) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6612) 	lpfc_destroy_vport_work_array(phba, vports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6613) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6616) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6617)  * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6618)  * @phba: Pointer to hba context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6619)  * @mboxq: Pointer to mailbox object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6620)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6621)  * This function frees memory associated with the mailbox command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6622)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6623) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6624) lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6626) 	struct lpfc_vport *vport = mboxq->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6627) 	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6629) 	if (mboxq->u.mb.mbxStatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6630) 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6631) 				"2555 UNREG_VFI mbxStatus error x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6632) 				"HBA state x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6633) 				mboxq->u.mb.mbxStatus, vport->port_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6635) 	spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6636) 	phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6637) 	spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6638) 	mempool_free(mboxq, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6639) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6642) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6643)  * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6644)  * @phba: Pointer to hba context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6645)  * @mboxq: Pointer to mailbox object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6646)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6647)  * This function frees memory associated with the mailbox command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6648)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6649) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6650) lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6652) 	struct lpfc_vport *vport = mboxq->vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6654) 	if (mboxq->u.mb.mbxStatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6655) 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6656) 				"2550 UNREG_FCFI mbxStatus error x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6657) 				"HBA state x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6658) 				mboxq->u.mb.mbxStatus, vport->port_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6660) 	mempool_free(mboxq, phba->mbox_mem_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6661) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6664) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6665)  * lpfc_unregister_fcf_prep - Unregister fcf record preparation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6666)  * @phba: Pointer to hba context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6667)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6668)  * This function prepare the HBA for unregistering the currently registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6669)  * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6670)  * VFIs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6671)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6672) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6673) lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6675) 	struct lpfc_vport **vports;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6676) 	struct lpfc_nodelist *ndlp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6677) 	struct Scsi_Host *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6678) 	int i = 0, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6680) 	/* Unregister RPIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6681) 	if (lpfc_fcf_inuse(phba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6682) 		lpfc_unreg_hba_rpis(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6684) 	/* At this point, all discovery is aborted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6685) 	phba->pport->port_state = LPFC_VPORT_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6687) 	/* Unregister VPIs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6688) 	vports = lpfc_create_vport_work_array(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6689) 	if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6690) 		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6691) 			/* Stop FLOGI/FDISC retries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6692) 			ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6693) 			if (ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6694) 				lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6695) 			lpfc_cleanup_pending_mbox(vports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6696) 			if (phba->sli_rev == LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6697) 				lpfc_sli4_unreg_all_rpis(vports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6698) 			lpfc_mbx_unreg_vpi(vports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6699) 			shost = lpfc_shost_from_vport(vports[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6700) 			spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6701) 			vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6702) 			vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6703) 			spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6704) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6705) 	lpfc_destroy_vport_work_array(phba, vports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6706) 	if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6707) 		ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6708) 		if (ndlp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6709) 			lpfc_cancel_retry_delay_tmo(phba->pport, ndlp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6710) 		lpfc_cleanup_pending_mbox(phba->pport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6711) 		if (phba->sli_rev == LPFC_SLI_REV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6712) 			lpfc_sli4_unreg_all_rpis(phba->pport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6713) 		lpfc_mbx_unreg_vpi(phba->pport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6714) 		shost = lpfc_shost_from_vport(phba->pport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6715) 		spin_lock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6716) 		phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6717) 		phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6718) 		spin_unlock_irq(shost->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6719) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6721) 	/* Cleanup any outstanding ELS commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6722) 	lpfc_els_flush_all_cmd(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6724) 	/* Unregister the physical port VFI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6725) 	rc = lpfc_issue_unreg_vfi(phba->pport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6726) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6729) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6730)  * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6731)  * @phba: Pointer to hba context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6732)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6733)  * This function issues synchronous unregister FCF mailbox command to HBA to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6734)  * unregister the currently registered FCF record. The driver does not reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6735)  * the driver FCF usage state flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6736)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6737)  * Return 0 if successfully issued, none-zero otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6738)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6739) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6740) lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6742) 	LPFC_MBOXQ_t *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6743) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6745) 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6746) 	if (!mbox) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6747) 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6748) 				"2551 UNREG_FCFI mbox allocation failed"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6749) 				"HBA state x%x\n", phba->pport->port_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6750) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6751) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6752) 	lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6753) 	mbox->vport = phba->pport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6754) 	mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6755) 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6757) 	if (rc == MBX_NOT_FINISHED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6758) 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6759) 				"2552 Unregister FCFI command failed rc x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6760) 				"HBA state x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6761) 				rc, phba->pport->port_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6762) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6763) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6764) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6767) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6768)  * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6769)  * @phba: Pointer to hba context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6770)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6771)  * This function unregisters the currently reigstered FCF. This function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6772)  * also tries to find another FCF for discovery by rescan the HBA FCF table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6773)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6774) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6775) lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6777) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6779) 	/* Preparation for unregistering fcf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6780) 	rc = lpfc_unregister_fcf_prep(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6781) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6782) 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6783) 				"2748 Failed to prepare for unregistering "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6784) 				"HBA's FCF record: rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6785) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6786) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6788) 	/* Now, unregister FCF record and reset HBA FCF state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6789) 	rc = lpfc_sli4_unregister_fcf(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6790) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6791) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6792) 	/* Reset HBA FCF states after successful unregister FCF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6793) 	phba->fcf.fcf_flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6794) 	phba->fcf.current_rec.flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6796) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6797) 	 * If driver is not unloading, check if there is any other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6798) 	 * FCF record that can be used for discovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6799) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6800) 	if ((phba->pport->load_flag & FC_UNLOADING) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6801) 	    (phba->link_state < LPFC_LINK_UP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6802) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6804) 	/* This is considered as the initial FCF discovery scan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6805) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6806) 	phba->fcf.fcf_flag |= FCF_INIT_DISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6807) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6809) 	/* Reset FCF roundrobin bmask for new discovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6810) 	lpfc_sli4_clear_fcf_rr_bmask(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6812) 	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6814) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6815) 		spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6816) 		phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6817) 		spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6818) 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6819) 				"2553 lpfc_unregister_unused_fcf failed "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6820) 				"to read FCF record HBA state x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6821) 				phba->pport->port_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6822) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6825) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6826)  * lpfc_unregister_fcf - Unregister the currently registered fcf record
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6827)  * @phba: Pointer to hba context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6828)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6829)  * This function just unregisters the currently reigstered FCF. It does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6830)  * try to find another FCF for discovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6831)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6832) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6833) lpfc_unregister_fcf(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6835) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6837) 	/* Preparation for unregistering fcf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6838) 	rc = lpfc_unregister_fcf_prep(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6839) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6840) 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6841) 				"2749 Failed to prepare for unregistering "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6842) 				"HBA's FCF record: rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6843) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6844) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6846) 	/* Now, unregister FCF record and reset HBA FCF state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6847) 	rc = lpfc_sli4_unregister_fcf(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6848) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6849) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6850) 	/* Set proper HBA FCF states after successful unregister FCF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6851) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6852) 	phba->fcf.fcf_flag &= ~FCF_REGISTERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6853) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6856) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6857)  * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6858)  * @phba: Pointer to hba context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6859)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6860)  * This function check if there are any connected remote port for the FCF and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6861)  * if all the devices are disconnected, this function unregister FCFI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6862)  * This function also tries to use another FCF for discovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6863)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6864) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6865) lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6867) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6868) 	 * If HBA is not running in FIP mode, if HBA does not support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6869) 	 * FCoE, if FCF discovery is ongoing, or if FCF has not been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6870) 	 * registered, do nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6871) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6872) 	spin_lock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6873) 	if (!(phba->hba_flag & HBA_FCOE_MODE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6874) 	    !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6875) 	    !(phba->hba_flag & HBA_FIP_SUPPORT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6876) 	    (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6877) 	    (phba->pport->port_state == LPFC_FLOGI)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6878) 		spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6879) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6880) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6881) 	spin_unlock_irq(&phba->hbalock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6883) 	if (lpfc_fcf_inuse(phba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6884) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6886) 	lpfc_unregister_fcf_rescan(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6889) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6890)  * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6891)  * @phba: Pointer to hba context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6892)  * @buff: Buffer containing the FCF connection table as in the config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6893)  *         region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6894)  * This function create driver data structure for the FCF connection
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6895)  * record table read from config region 23.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6896)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6897) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6898) lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6899) 	uint8_t *buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6901) 	struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6902) 	struct lpfc_fcf_conn_hdr *conn_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6903) 	struct lpfc_fcf_conn_rec *conn_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6904) 	uint32_t record_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6905) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6907) 	/* Free the current connect table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6908) 	list_for_each_entry_safe(conn_entry, next_conn_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6909) 		&phba->fcf_conn_rec_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6910) 		list_del_init(&conn_entry->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6911) 		kfree(conn_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6912) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6914) 	conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6915) 	record_count = conn_hdr->length * sizeof(uint32_t)/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6916) 		sizeof(struct lpfc_fcf_conn_rec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6918) 	conn_rec = (struct lpfc_fcf_conn_rec *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6919) 		(buff + sizeof(struct lpfc_fcf_conn_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6921) 	for (i = 0; i < record_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6922) 		if (!(conn_rec[i].flags & FCFCNCT_VALID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6923) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6924) 		conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6925) 			GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6926) 		if (!conn_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6927) 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6928) 					"2566 Failed to allocate connection"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6929) 					" table entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6930) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6931) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6933) 		memcpy(&conn_entry->conn_rec, &conn_rec[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6934) 			sizeof(struct lpfc_fcf_conn_rec));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6935) 		list_add_tail(&conn_entry->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6936) 			&phba->fcf_conn_rec_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6937) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6939) 	if (!list_empty(&phba->fcf_conn_rec_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6940) 		i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6941) 		list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6942) 				    list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6943) 			conn_rec = &conn_entry->conn_rec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6944) 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6945) 					"3345 FCF connection list rec[%02d]: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6946) 					"flags:x%04x, vtag:x%04x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6947) 					"fabric_name:x%02x:%02x:%02x:%02x:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6948) 					"%02x:%02x:%02x:%02x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6949) 					"switch_name:x%02x:%02x:%02x:%02x:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6950) 					"%02x:%02x:%02x:%02x\n", i++,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6951) 					conn_rec->flags, conn_rec->vlan_tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6952) 					conn_rec->fabric_name[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6953) 					conn_rec->fabric_name[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6954) 					conn_rec->fabric_name[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6955) 					conn_rec->fabric_name[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6956) 					conn_rec->fabric_name[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6957) 					conn_rec->fabric_name[5],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6958) 					conn_rec->fabric_name[6],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6959) 					conn_rec->fabric_name[7],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6960) 					conn_rec->switch_name[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6961) 					conn_rec->switch_name[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6962) 					conn_rec->switch_name[2],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6963) 					conn_rec->switch_name[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6964) 					conn_rec->switch_name[4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6965) 					conn_rec->switch_name[5],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6966) 					conn_rec->switch_name[6],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6967) 					conn_rec->switch_name[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6968) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6969) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6972) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6973)  * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6974)  * @phba: Pointer to hba context object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6975)  * @buff: Buffer containing the FCoE parameter data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6976)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6977)  *  This function update driver data structure with config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6978)  *  parameters read from config region 23.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6979)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6980) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6981) lpfc_read_fcoe_param(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6982) 			uint8_t *buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6984) 	struct lpfc_fip_param_hdr *fcoe_param_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6985) 	struct lpfc_fcoe_params *fcoe_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6987) 	fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6988) 		buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6989) 	fcoe_param = (struct lpfc_fcoe_params *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6990) 		(buff + sizeof(struct lpfc_fip_param_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6992) 	if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6993) 		(fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6994) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6996) 	if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6997) 		phba->valid_vlan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6998) 		phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6999) 			0xFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7000) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7002) 	phba->fc_map[0] = fcoe_param->fc_map[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7003) 	phba->fc_map[1] = fcoe_param->fc_map[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7004) 	phba->fc_map[2] = fcoe_param->fc_map[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7005) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7008) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7009)  * lpfc_get_rec_conf23 - Get a record type in config region data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7010)  * @buff: Buffer containing config region 23 data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7011)  * @size: Size of the data buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7012)  * @rec_type: Record type to be searched.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7013)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7014)  * This function searches config region data to find the beginning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7015)  * of the record specified by record_type. If record found, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7016)  * function return pointer to the record else return NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7017)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7018) static uint8_t *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7019) lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7020) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7021) 	uint32_t offset = 0, rec_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7023) 	if ((buff[0] == LPFC_REGION23_LAST_REC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7024) 		(size < sizeof(uint32_t)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7025) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7027) 	rec_length = buff[offset + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7029) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7030) 	 * One TLV record has one word header and number of data words
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7031) 	 * specified in the rec_length field of the record header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7032) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7033) 	while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7034) 		<= size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7035) 		if (buff[offset] == rec_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7036) 			return &buff[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7038) 		if (buff[offset] == LPFC_REGION23_LAST_REC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7039) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7041) 		offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7042) 		rec_length = buff[offset + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7043) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7044) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7047) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7048)  * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7049)  * @phba: Pointer to lpfc_hba data structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7050)  * @buff: Buffer containing config region 23 data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7051)  * @size: Size of the data buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7052)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7053)  * This function parses the FCoE config parameters in config region 23 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7054)  * populate driver data structure with the parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7055)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7056) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7057) lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7058) 		uint8_t *buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7059) 		uint32_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7061) 	uint32_t offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7062) 	uint8_t *rec_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7064) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7065) 	 * If data size is less than 2 words signature and version cannot be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7066) 	 * verified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7067) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7068) 	if (size < 2*sizeof(uint32_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7069) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7071) 	/* Check the region signature first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7072) 	if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7073) 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7074) 			"2567 Config region 23 has bad signature\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7075) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7076) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7077) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7078) 	offset += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7080) 	/* Check the data structure version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7081) 	if (buff[offset] != LPFC_REGION23_VERSION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7082) 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7083) 				"2568 Config region 23 has bad version\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7084) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7085) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7086) 	offset += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7088) 	/* Read FCoE param record */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7089) 	rec_ptr = lpfc_get_rec_conf23(&buff[offset],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7090) 			size - offset, FCOE_PARAM_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7091) 	if (rec_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7092) 		lpfc_read_fcoe_param(phba, rec_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7094) 	/* Read FCF connection table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7095) 	rec_ptr = lpfc_get_rec_conf23(&buff[offset],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7096) 		size - offset, FCOE_CONN_TBL_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7097) 	if (rec_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7098) 		lpfc_read_fcf_conn_tbl(phba, rec_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7100) }