Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright 2017 Broadcom. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Contact Information:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * linux-drivers@broadcom.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <scsi/iscsi_proto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include "be_main.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include "be.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include "be_mgmt.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) /* UE Status Low CSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) static const char * const desc_ue_status_low[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) 	"CEV",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 	"CTX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) 	"DBUF",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 	"ERX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 	"Host",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 	"MPU",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 	"NDMA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 	"PTC ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 	"RDMA ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 	"RXF ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 	"RXIPS ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 	"RXULP0 ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 	"RXULP1 ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 	"RXULP2 ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 	"TIM ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 	"TPOST ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	"TPRE ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 	"TXIPS ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 	"TXULP0 ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 	"TXULP1 ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	"UC ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	"WDMA ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 	"TXULP2 ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	"HOST1 ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	"P0_OB_LINK ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	"P1_OB_LINK ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	"HOST_GPIO ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	"MBOX ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	"AXGMAC0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	"AXGMAC1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	"JTAG",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	"MPU_INTPEND"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) /* UE Status High CSR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) static const char * const desc_ue_status_hi[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	"LPCMEMHOST",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	"MGMT_MAC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	"PCS0ONLINE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	"MPU_IRAM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	"PCS1ONLINE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	"PCTL0",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	"PCTL1",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	"PMEM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	"RR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	"TXPB",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	"RXPP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	"XAUI",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	"TXP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	"ARM",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	"IPC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	"HOST2",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	"HOST3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	"HOST4",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	"HOST5",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	"HOST6",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	"HOST7",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	"HOST8",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	"HOST9",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	"NETC",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	"Unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	"Unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	"Unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	"Unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	"Unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	"Unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	"Unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	"Unknown"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) struct be_mcc_wrb *alloc_mcc_wrb(struct beiscsi_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 				 unsigned int *ref_tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	struct be_mcc_wrb *wrb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	unsigned int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	spin_lock(&phba->ctrl.mcc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	if (mccq->used == mccq->len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 			    "BC_%d : MCC queue full: WRB used %u tag avail %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 			    mccq->used, phba->ctrl.mcc_tag_available);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		goto alloc_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	if (!phba->ctrl.mcc_tag_available)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 		goto alloc_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	if (!tag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 			    "BC_%d : MCC tag 0 allocated: tag avail %u alloc index %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 			    phba->ctrl.mcc_tag_available,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 			    phba->ctrl.mcc_alloc_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		goto alloc_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	/* return this tag for further reference */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	*ref_tag = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	phba->ctrl.mcc_tag_status[tag] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	phba->ctrl.ptag_state[tag].tag_state = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	phba->ctrl.ptag_state[tag].cbfn = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	phba->ctrl.mcc_tag_available--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		phba->ctrl.mcc_alloc_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		phba->ctrl.mcc_alloc_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	wrb = queue_head_node(mccq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	memset(wrb, 0, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	wrb->tag0 = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	wrb->tag0 |= (mccq->head << MCC_Q_WRB_IDX_SHIFT) & MCC_Q_WRB_IDX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	queue_head_inc(mccq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	mccq->used++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) alloc_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	spin_unlock(&phba->ctrl.mcc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	return wrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) void free_mcc_wrb(struct be_ctrl_info *ctrl, unsigned int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	struct be_queue_info *mccq = &ctrl->mcc_obj.q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	spin_lock(&ctrl->mcc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	tag = tag & MCC_Q_CMD_TAG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		ctrl->mcc_free_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		ctrl->mcc_free_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	ctrl->mcc_tag_available++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	mccq->used--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	spin_unlock(&ctrl->mcc_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * beiscsi_mcc_compl_status - Return the status of MCC completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * @phba: Driver private structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  * @tag: Tag for the MBX Command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  * @wrb: the WRB used for the MBX Command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164)  * return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165)  * Success: 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166)  * Failure: Non-Zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) int __beiscsi_mcc_compl_status(struct beiscsi_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 			       unsigned int tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 			       struct be_mcc_wrb **wrb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 			       struct be_dma_mem *mbx_cmd_mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	uint16_t status = 0, addl_status = 0, wrb_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	struct be_cmd_resp_hdr *mbx_resp_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	struct be_cmd_req_hdr *mbx_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	struct be_mcc_wrb *temp_wrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	uint32_t mcc_tag_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	mcc_tag_status = phba->ctrl.mcc_tag_status[tag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	status = (mcc_tag_status & CQE_STATUS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	addl_status = ((mcc_tag_status & CQE_STATUS_ADDL_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 			CQE_STATUS_ADDL_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	if (mbx_cmd_mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		mbx_hdr = (struct be_cmd_req_hdr *)mbx_cmd_mem->va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		wrb_num = (mcc_tag_status & CQE_STATUS_WRB_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 			  CQE_STATUS_WRB_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		mbx_hdr = embedded_payload(temp_wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		if (wrb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 			*wrb = temp_wrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	if (status || addl_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		beiscsi_log(phba, KERN_WARNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 			    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 			    BEISCSI_LOG_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 			    "BC_%d : MBX Cmd Failed for Subsys : %d Opcode : %d with Status : %d and Extd_Status : %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 			    mbx_hdr->subsystem, mbx_hdr->opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 			    status, addl_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 		if (status == MCC_STATUS_INSUFFICIENT_BUFFER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 			mbx_resp_hdr = (struct be_cmd_resp_hdr *)mbx_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 			beiscsi_log(phba, KERN_WARNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 				    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 				    BEISCSI_LOG_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 				    "BC_%d : Insufficient Buffer Error Resp_Len : %d Actual_Resp_Len : %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 				    mbx_resp_hdr->response_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 				    mbx_resp_hdr->actual_resp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 			rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  * beiscsi_mccq_compl_wait()- Process completion in MCC CQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  * @phba: Driver private structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  * @tag: Tag for the MBX Command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  * @wrb: the WRB used for the MBX Command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  * @mbx_cmd_mem: ptr to memory allocated for MBX Cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  * Waits for MBX completion with the passed TAG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  * return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231)  * Success: 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232)  * Failure: Non-Zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) int beiscsi_mccq_compl_wait(struct beiscsi_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 			    unsigned int tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 			    struct be_mcc_wrb **wrb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 			    struct be_dma_mem *mbx_cmd_mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	if (!tag || tag > MAX_MCC_CMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		__beiscsi_log(phba, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 			      "BC_%d : invalid tag %u\n", tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	if (beiscsi_hba_in_error(phba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		clear_bit(MCC_TAG_STATE_RUNNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 			  &phba->ctrl.ptag_state[tag].tag_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	/* wait for the mccq completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	rc = wait_event_interruptible_timeout(phba->ctrl.mcc_wait[tag],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 					      phba->ctrl.mcc_tag_status[tag],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 					      msecs_to_jiffies(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 						BEISCSI_HOST_MBX_TIMEOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	 * Return EIO if port is being disabled. Associated DMA memory, if any,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	 * is freed by the caller. When port goes offline, MCCQ is cleaned up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	 * so does WRB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		clear_bit(MCC_TAG_STATE_RUNNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 			  &phba->ctrl.ptag_state[tag].tag_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	 * If MBOX cmd timeout expired, tag and resource allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	 * for cmd is not freed until FW returns completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	if (rc <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		struct be_dma_mem *tag_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		 * PCI/DMA memory allocated and posted in non-embedded mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		 * will have mbx_cmd_mem != NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		 * Save virtual and bus addresses for the command so that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		 * can be freed later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		 **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 		tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		if (mbx_cmd_mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 			tag_mem->size = mbx_cmd_mem->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 			tag_mem->va = mbx_cmd_mem->va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 			tag_mem->dma = mbx_cmd_mem->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			tag_mem->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		/* first make tag_mem_state visible to all */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		set_bit(MCC_TAG_STATE_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 				&phba->ctrl.ptag_state[tag].tag_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		beiscsi_log(phba, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 			    BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 			    BEISCSI_LOG_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 			    "BC_%d : MBX Cmd Completion timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	rc = __beiscsi_mcc_compl_status(phba, tag, wrb, mbx_cmd_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	free_mcc_wrb(&phba->ctrl, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)  * beiscsi_process_mbox_compl()- Check the MBX completion status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  * @ctrl: Function specific MBX data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  * @compl: Completion status of MBX Command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313)  * Check for the MBX completion status when BMBX method used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315)  * return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316)  * Success: Zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  * Failure: Non-Zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) static int beiscsi_process_mbox_compl(struct be_ctrl_info *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 				      struct be_mcc_compl *compl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	u16 compl_status, extd_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	 * To check if valid bit is set, check the entire word as we don't know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	 * the endianness of the data (old entry is host endian while a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	 * entry is little endian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	if (!compl->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		beiscsi_log(phba, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 				BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 				"BC_%d : BMBX busy, no completion\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	compl->flags = le32_to_cpu(compl->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	 * Just swap the status to host endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	 * mcc tag is opaquely copied from mcc_wrb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	be_dws_le_to_cpu(compl, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		CQE_STATUS_COMPL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 		CQE_STATUS_EXTD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	/* Need to reset the entire word that houses the valid bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	compl->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	if (compl_status == MCC_STATUS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		    "BC_%d : error in cmd completion: Subsystem : %d Opcode : %d status(compl/extd)=%d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		    hdr->subsystem, hdr->opcode, compl_status, extd_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	return compl_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) static void beiscsi_process_async_link(struct beiscsi_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 				       struct be_mcc_compl *compl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	struct be_async_event_link_state *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	evt = (struct be_async_event_link_state *)compl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	phba->port_speed = evt->port_speed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	 * Check logical link status in ASYNC event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	 * This has been newly introduced in SKH-R Firmware 10.0.338.45.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	 **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	if (evt->port_link_status & BE_ASYNC_LINK_UP_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		set_bit(BEISCSI_HBA_LINK_UP, &phba->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		__beiscsi_log(phba, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 			      "BC_%d : Link Up on Port %d tag 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 			      evt->physical_port, evt->event_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		clear_bit(BEISCSI_HBA_LINK_UP, &phba->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		__beiscsi_log(phba, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 			      "BC_%d : Link Down on Port %d tag 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 			      evt->physical_port, evt->event_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		iscsi_host_for_each_session(phba->shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 					    beiscsi_session_fail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) static char *beiscsi_port_misconf_event_msg[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	"Physical Link is functional.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	"Optics faulted/incorrectly installed/not installed - Reseat optics, if issue not resolved, replace.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	"Optics of two types installed - Remove one optic or install matching pair of optics.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	"Incompatible optics - Replace with compatible optics for card to function.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	"Unqualified optics - Replace with Avago optics for Warranty and Technical Support.",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	"Uncertified optics - Replace with Avago Certified optics to enable link operation."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) static void beiscsi_process_async_sli(struct beiscsi_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 				      struct be_mcc_compl *compl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	struct be_async_event_sli *async_sli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	u8 evt_type, state, old_state, le;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	char *sev = KERN_WARNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	char *msg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	evt_type = compl->flags >> ASYNC_TRAILER_EVENT_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	evt_type &= ASYNC_TRAILER_EVENT_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	/* processing only MISCONFIGURED physical port event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	if (evt_type != ASYNC_SLI_EVENT_TYPE_MISCONFIGURED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	async_sli = (struct be_async_event_sli *)compl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	state = async_sli->event_data1 >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		 (phba->fw_config.phys_port * 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	le = async_sli->event_data2 >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		 (phba->fw_config.phys_port * 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	old_state = phba->optic_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	phba->optic_state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	if (state >= ARRAY_SIZE(beiscsi_port_misconf_event_msg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		/* fw is reporting a state we don't know, log and return */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		__beiscsi_log(phba, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 			    "BC_%d : Port %c: Unrecognized optic state 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 			    phba->port_name, async_sli->event_data1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	if (ASYNC_SLI_LINK_EFFECT_VALID(le)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		/* log link effect for unqualified-4, uncertified-5 optics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		if (state > 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 			msg = (ASYNC_SLI_LINK_EFFECT_STATE(le)) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 				" Link is non-operational." :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 				" Link is operational.";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		/* 1 - info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 			sev = KERN_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		/* 2 - error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		if (ASYNC_SLI_LINK_EFFECT_SEV(le) == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 			sev = KERN_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	if (old_state != phba->optic_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		__beiscsi_log(phba, sev, "BC_%d : Port %c: %s%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			      phba->port_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 			      beiscsi_port_misconf_event_msg[state],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 			      !msg ? "" : msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) void beiscsi_process_async_event(struct beiscsi_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 				struct be_mcc_compl *compl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	char *sev = KERN_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	u8 evt_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	/* interpret flags as an async trailer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	evt_code = compl->flags >> ASYNC_TRAILER_EVENT_CODE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	evt_code &= ASYNC_TRAILER_EVENT_CODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	switch (evt_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	case ASYNC_EVENT_CODE_LINK_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		beiscsi_process_async_link(phba, compl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	case ASYNC_EVENT_CODE_ISCSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		if (test_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			beiscsi_start_boot_work(phba, BE_BOOT_INVALID_SHANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		sev = KERN_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	case ASYNC_EVENT_CODE_SLI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		beiscsi_process_async_sli(phba, compl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		/* event not registered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		sev = KERN_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	beiscsi_log(phba, sev, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		    "BC_%d : ASYNC Event %x: status 0x%08x flags 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		    evt_code, compl->status, compl->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 			      struct be_mcc_compl *compl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	u16 compl_status, extd_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	struct be_dma_mem *tag_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	unsigned int tag, wrb_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	be_dws_le_to_cpu(compl, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	tag = (compl->tag0 & MCC_Q_CMD_TAG_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	wrb_idx = (compl->tag0 & CQE_STATUS_WRB_MASK) >> CQE_STATUS_WRB_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	if (!test_bit(MCC_TAG_STATE_RUNNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		      &ctrl->ptag_state[tag].tag_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			    BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			    "BC_%d : MBX cmd completed but not posted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	/* end MCC with this tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	clear_bit(MCC_TAG_STATE_RUNNING, &ctrl->ptag_state[tag].tag_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	if (test_bit(MCC_TAG_STATE_TIMEOUT, &ctrl->ptag_state[tag].tag_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		beiscsi_log(phba, KERN_WARNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 			    BEISCSI_LOG_MBOX | BEISCSI_LOG_INIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 			    BEISCSI_LOG_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 			    "BC_%d : MBX Completion for timeout Command from FW\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		 * Check for the size before freeing resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		 * Only for non-embedded cmd, PCI resource is allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		 **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		if (tag_mem->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 			dma_free_coherent(&ctrl->pdev->dev, tag_mem->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 					tag_mem->va, tag_mem->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 			tag_mem->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		free_mcc_wrb(ctrl, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		       CQE_STATUS_COMPL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		      CQE_STATUS_EXTD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	/* The ctrl.mcc_tag_status[tag] is filled with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	 * [7:0] = compl_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	ctrl->mcc_tag_status[tag] = CQE_VALID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	ctrl->mcc_tag_status[tag] |= (wrb_idx << CQE_STATUS_WRB_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	ctrl->mcc_tag_status[tag] |= (extd_status << CQE_STATUS_ADDL_SHIFT) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 				     CQE_STATUS_ADDL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	ctrl->mcc_tag_status[tag] |= (compl_status & CQE_STATUS_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	if (test_bit(MCC_TAG_STATE_ASYNC, &ctrl->ptag_state[tag].tag_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		if (ctrl->ptag_state[tag].cbfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			ctrl->ptag_state[tag].cbfn(phba, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 			__beiscsi_log(phba, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 				      "BC_%d : MBX ASYNC command with no callback\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		free_mcc_wrb(ctrl, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	if (test_bit(MCC_TAG_STATE_IGNORE, &ctrl->ptag_state[tag].tag_state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		/* just check completion status and free wrb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		__beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 		free_mcc_wrb(ctrl, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	wake_up_interruptible(&ctrl->mcc_wait[tag]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) void be_mcc_notify(struct beiscsi_hba *phba, unsigned int tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	set_bit(MCC_TAG_STATE_RUNNING, &phba->ctrl.ptag_state[tag].tag_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	/* make request available for DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  * be_mbox_db_ready_poll()- Check ready status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  * @ctrl: Function specific MBX data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  * Check for the ready status of FW to send BMBX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579)  * commands to adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581)  * return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582)  * Success: 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  * Failure: Non-Zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) static int be_mbox_db_ready_poll(struct be_ctrl_info *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	/* wait 30s for generic non-flash MBOX operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) #define BEISCSI_MBX_RDY_BIT_TIMEOUT	30000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	u32 ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	 * This BMBX busy wait path is used during init only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	 * For the commands executed during init, 5s should suffice.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	timeout = jiffies + msecs_to_jiffies(BEISCSI_MBX_RDY_BIT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		if (beiscsi_hba_in_error(phba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		ready = ioread32(db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		if (ready == 0xffffffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		ready &= MPU_MAILBOX_DB_RDY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		if (ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		if (time_after(jiffies, timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		/* 1ms sleep is enough in most cases */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		schedule_timeout_uninterruptible(msecs_to_jiffies(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	} while (!ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	beiscsi_log(phba, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 			"BC_%d : FW Timed Out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	set_bit(BEISCSI_HBA_FW_TIMEOUT, &phba->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625)  * be_mbox_notify: Notify adapter of new BMBX command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626)  * @ctrl: Function specific MBX data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628)  * Ring doorbell to inform adapter of a BMBX command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629)  * to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631)  * return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632)  * Success: 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633)  * Failure: Non-Zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) static int be_mbox_notify(struct be_ctrl_info *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	u32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	struct be_mcc_mailbox *mbox = mbox_mem->va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	status = be_mbox_db_ready_poll(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	val &= ~MPU_MAILBOX_DB_RDY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	val |= MPU_MAILBOX_DB_HI_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	iowrite32(val, db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	status = be_mbox_db_ready_poll(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	val &= ~MPU_MAILBOX_DB_RDY_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	val &= ~MPU_MAILBOX_DB_HI_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	val |= (u32) (mbox_mem->dma >> 4) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	iowrite32(val, db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	status = be_mbox_db_ready_poll(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	/* RDY is set; small delay before CQE read. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	status = beiscsi_process_mbox_compl(ctrl, &mbox->compl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, u32 payload_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			bool embedded, u8 sge_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	if (embedded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		wrb->emb_sgecnt_special |= MCC_WRB_EMBEDDED_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		wrb->emb_sgecnt_special |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 					   MCC_WRB_SGE_CNT_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	wrb->payload_length = payload_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	be_dws_cpu_to_le(wrb, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			u8 subsystem, u8 opcode, u32 cmd_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	req_hdr->opcode = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	req_hdr->subsystem = subsystem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 							struct be_dma_mem *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	int i, buf_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	u64 dma = (u64) mem->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	for (i = 0; i < buf_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		dma += PAGE_SIZE_4K;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) static u32 eq_delay_to_mult(u32 usec_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) #define MAX_INTR_RATE 651042
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	const u32 round = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	u32 multiplier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	if (usec_delay == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		multiplier = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		u32 interrupt_rate = 1000000 / usec_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		if (interrupt_rate == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			multiplier = 1023;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			multiplier /= interrupt_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			multiplier = (multiplier + round / 2) / round;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			multiplier = min(multiplier, (u32) 1023);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	return multiplier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 			  struct be_queue_info *eq, int eq_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	struct be_cmd_req_eq_create *req = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	struct be_dma_mem *q_mem = &eq->dma_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	mutex_lock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	memset(wrb, 0, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			OPCODE_COMMON_EQ_CREATE, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	AMAP_SET_BITS(struct amap_eq_context, func, req->context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 						PCI_FUNC(ctrl->pdev->devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 					__ilog2_u32(eq->len / 256));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 					eq_delay_to_mult(eq_delay));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	be_dws_cpu_to_le(req->context, sizeof(req->context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	status = be_mbox_notify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 		eq->id = le16_to_cpu(resp->eq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		eq->created = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			  struct be_queue_info *cq, struct be_queue_info *eq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 			  bool sol_evts, bool no_delay, int coalesce_wm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	struct be_cmd_req_cq_create *req = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	struct be_dma_mem *q_mem = &cq->dma_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	void *ctxt = &req->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	mutex_lock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	memset(wrb, 0, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 			OPCODE_COMMON_CQ_CREATE, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	if (is_chip_be2_be3r(phba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		AMAP_SET_BITS(struct amap_cq_context, coalescwm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			      ctxt, coalesce_wm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			      __ilog2_u32(cq->len / 256));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 			      PCI_FUNC(ctrl->pdev->devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		req->hdr.version = MBX_CMD_VER2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		req->page_size = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			      ctxt, coalesce_wm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			      ctxt, no_delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			      __ilog2_u32(cq->len / 256));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	status = be_mbox_notify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		cq->id = le16_to_cpu(resp->cq_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		cq->created = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			    "BC_%d : In be_cmd_cq_create, status=ox%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			    status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) static u32 be_encoded_q_len(int q_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	u32 len_encoded = fls(q_len);	/* log2(len) + 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (len_encoded == 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		len_encoded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	return len_encoded;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 			struct be_queue_info *mccq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 			struct be_queue_info *cq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	struct be_mcc_wrb *wrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	struct be_cmd_req_mcc_create_ext *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	struct be_dma_mem *q_mem = &mccq->dma_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	struct be_ctrl_info *ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	void *ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	mutex_lock(&phba->ctrl.mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	ctrl = &phba->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	memset(wrb, 0, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	req = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	ctxt = &req->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 			OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	req->async_evt_bitmap = 1 << ASYNC_EVENT_CODE_LINK_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_ISCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	req->async_evt_bitmap |= 1 << ASYNC_EVENT_CODE_SLI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		      PCI_FUNC(phba->pcidev->devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		be_encoded_q_len(mccq->len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	status = be_mbox_notify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		mccq->id = le16_to_cpu(resp->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		mccq->created = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	mutex_unlock(&phba->ctrl.mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			  int queue_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	u8 subsys = 0, opcode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		    "BC_%d : In beiscsi_cmd_q_destroy "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		    "queue_type : %d\n", queue_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	mutex_lock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	memset(wrb, 0, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	switch (queue_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	case QTYPE_EQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		subsys = CMD_SUBSYSTEM_COMMON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		opcode = OPCODE_COMMON_EQ_DESTROY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	case QTYPE_CQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		subsys = CMD_SUBSYSTEM_COMMON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		opcode = OPCODE_COMMON_CQ_DESTROY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	case QTYPE_MCCQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		subsys = CMD_SUBSYSTEM_COMMON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		opcode = OPCODE_COMMON_MCC_DESTROY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	case QTYPE_WRBQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		subsys = CMD_SUBSYSTEM_ISCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	case QTYPE_DPDUQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		subsys = CMD_SUBSYSTEM_ISCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	case QTYPE_SGL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		subsys = CMD_SUBSYSTEM_ISCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	if (queue_type != QTYPE_SGL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		req->id = cpu_to_le16(q->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	status = be_mbox_notify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957)  * be_cmd_create_default_pdu_queue()- Create DEFQ for the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958)  * @ctrl: ptr to ctrl_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959)  * @cq: Completion Queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960)  * @dq: Default Queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961)  * @length: ring size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962)  * @entry_size: size of each entry in DEFQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963)  * @is_header: Header or Data DEFQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964)  * @ulp_num: Bind to which ULP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966)  * Create HDR/Data DEFQ for the passed ULP. Unsol PDU are posted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967)  * on this queue by the FW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969)  * return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970)  *	Success: 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971)  *	Failure: Non-Zero Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 				    struct be_queue_info *cq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 				    struct be_queue_info *dq, int length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 				    int entry_size, uint8_t is_header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 				    uint8_t ulp_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	struct be_defq_create_req *req = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	struct be_dma_mem *q_mem = &dq->dma_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	void *ctxt = &req->context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	mutex_lock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	memset(wrb, 0, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 			   OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	if (phba->fw_config.dual_ulp_aware) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		req->ulp_num = ulp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	if (is_chip_be2_be3r(phba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		AMAP_SET_BITS(struct amap_be_default_pdu_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 			      rx_pdid, ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		AMAP_SET_BITS(struct amap_be_default_pdu_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 			      rx_pdid_valid, ctxt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		AMAP_SET_BITS(struct amap_be_default_pdu_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 			      pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		AMAP_SET_BITS(struct amap_be_default_pdu_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 			      ring_size, ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 			      be_encoded_q_len(length /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 			      sizeof(struct phys_addr)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		AMAP_SET_BITS(struct amap_be_default_pdu_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 			      default_buffer_size, ctxt, entry_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		AMAP_SET_BITS(struct amap_be_default_pdu_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			      cq_id_recv, ctxt,	cq->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		AMAP_SET_BITS(struct amap_default_pdu_context_ext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			      rx_pdid, ctxt, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		AMAP_SET_BITS(struct amap_default_pdu_context_ext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			      rx_pdid_valid, ctxt, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		AMAP_SET_BITS(struct amap_default_pdu_context_ext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			      ring_size, ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 			      be_encoded_q_len(length /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 			      sizeof(struct phys_addr)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		AMAP_SET_BITS(struct amap_default_pdu_context_ext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 			      default_buffer_size, ctxt, entry_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		AMAP_SET_BITS(struct amap_default_pdu_context_ext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			      cq_id_recv, ctxt, cq->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	status = be_mbox_notify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		struct be_ring *defq_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		struct be_defq_create_resp *resp = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		dq->id = le16_to_cpu(resp->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		dq->created = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		if (is_header)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			defq_ring = &phba->phwi_ctrlr->default_pdu_hdr[ulp_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			defq_ring = &phba->phwi_ctrlr->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 				    default_pdu_data[ulp_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		defq_ring->id = dq->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		if (!phba->fw_config.dual_ulp_aware) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			defq_ring->ulp_num = BEISCSI_ULP0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			defq_ring->doorbell_offset = DB_RXULP0_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			defq_ring->ulp_num = resp->ulp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			defq_ring->doorbell_offset = resp->doorbell_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)  * be_cmd_wrbq_create()- Create WRBQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)  * @ctrl: ptr to ctrl_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)  * @q_mem: memory details for the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)  * @wrbq: queue info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069)  * @pwrb_context: ptr to wrb_context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)  * @ulp_num: ULP on which the WRBQ is to be created
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)  * Create WRBQ on the passed ULP_NUM.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) int be_cmd_wrbq_create(struct be_ctrl_info *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			struct be_dma_mem *q_mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 			struct be_queue_info *wrbq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 			struct hwi_wrb_context *pwrb_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 			uint8_t ulp_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	struct be_wrbq_create_req *req = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	struct be_wrbq_create_resp *resp = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	mutex_lock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	memset(wrb, 0, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	if (phba->fw_config.dual_ulp_aware) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		req->ulp_num = ulp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	status = be_mbox_notify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		wrbq->id = le16_to_cpu(resp->cid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		wrbq->created = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		pwrb_context->cid = wrbq->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		if (!phba->fw_config.dual_ulp_aware) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			pwrb_context->doorbell_offset = DB_TXULP0_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 			pwrb_context->ulp_num = BEISCSI_ULP0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 			pwrb_context->ulp_num = resp->ulp_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			pwrb_context->doorbell_offset = resp->doorbell_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) int be_cmd_iscsi_post_template_hdr(struct be_ctrl_info *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 				    struct be_dma_mem *q_mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	struct be_post_template_pages_req *req = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	mutex_lock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	memset(wrb, 0, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 			   OPCODE_COMMON_ADD_TEMPLATE_HEADER_BUFFERS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 			   sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	status = be_mbox_notify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) int be_cmd_iscsi_remove_template_hdr(struct be_ctrl_info *ctrl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	struct be_remove_template_pages_req *req = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	mutex_lock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	memset(wrb, 0, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 			   OPCODE_COMMON_REMOVE_TEMPLATE_HEADER_BUFFERS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 			   sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	status = be_mbox_notify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 				struct be_dma_mem *q_mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 				u32 page_offset, u32 num_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	struct be_post_sgl_pages_req *req = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	unsigned int curr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	u32 internal_page_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	u32 temp_num_pages = num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	if (num_pages == 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		num_pages = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	mutex_lock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		memset(wrb, 0, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 				   OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 				   sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 						pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		req->num_pages = min(num_pages, curr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		req->page_offset = page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		internal_page_offset += req->num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		page_offset += req->num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		num_pages -= req->num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		if (temp_num_pages == 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			req->num_pages = temp_num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		status = be_mbox_notify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 				    "BC_%d : FW CMD to map iscsi frags failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 			goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	} while (num_pages > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	if (status != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)  * be_cmd_set_vlan()- Configure VLAN paramters on the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219)  * @phba: device priv structure instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220)  * @vlan_tag: TAG to be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222)  * Set the VLAN_TAG for the adapter or Disable VLAN on adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224)  * returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)  *	TAG for the MBX Cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)  * **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) int be_cmd_set_vlan(struct beiscsi_hba *phba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		     uint16_t vlan_tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	unsigned int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	struct be_mcc_wrb *wrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	struct be_cmd_set_vlan_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	struct be_ctrl_info *ctrl = &phba->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	if (mutex_lock_interruptible(&ctrl->mbox_lock))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	wrb = alloc_mcc_wrb(phba, &tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	if (!wrb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	req = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	be_wrb_hdr_prepare(wrb, sizeof(*wrb), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 			   OPCODE_COMMON_ISCSI_NTWK_SET_VLAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			   sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	req->interface_hndl = phba->interface_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	req->vlan_priority = vlan_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	be_mcc_notify(phba, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	return tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 			       struct beiscsi_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	struct be_dma_mem nonemb_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	struct be_mgmt_controller_attributes *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	struct be_sge *sge = nonembedded_sgl(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	nonemb_cmd.va = dma_alloc_coherent(&ctrl->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 				sizeof(struct be_mgmt_controller_attributes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 				&nonemb_cmd.dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	if (nonemb_cmd.va == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 			    "BG_%d : dma_alloc_coherent failed in %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 			    __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	nonemb_cmd.size = sizeof(struct be_mgmt_controller_attributes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	req = nonemb_cmd.va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	memset(req, 0, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	mutex_lock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	memset(wrb, 0, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 			   OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	sge->len = cpu_to_le32(nonemb_cmd.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	status = be_mbox_notify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	if (!status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		struct be_mgmt_controller_attributes_resp *resp = nonemb_cmd.va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 			    "BG_%d : Firmware Version of CMD : %s\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			    "Firmware Version is : %s\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			    "Developer Build, not performing version check...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 			    resp->params.hba_attribs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			    .flashrom_version_string,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 			    resp->params.hba_attribs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 			    firmware_version_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		phba->fw_config.iscsi_features =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 				resp->params.hba_attribs.iscsi_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 			    "BM_%d : phba->fw_config.iscsi_features = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 			    phba->fw_config.iscsi_features);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		memcpy(phba->fw_ver_str, resp->params.hba_attribs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		       firmware_version_string, BEISCSI_VER_STRLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 			    "BG_%d :  Failed in beiscsi_check_supported_fw\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	if (nonemb_cmd.va)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 		dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd.size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 				    nonemb_cmd.va, nonemb_cmd.dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)  * beiscsi_get_fw_config()- Get the FW config for the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320)  * @ctrl: ptr to Ctrl Info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)  * @phba: ptr to the dev priv structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)  * Get the FW config and resources available for the function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)  * The resources are created based on the count received here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)  * return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)  *	Success: 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)  *	Failure: Non-Zero Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) int beiscsi_get_fw_config(struct be_ctrl_info *ctrl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			  struct beiscsi_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	struct be_fw_cfg *pfw_cfg = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	uint32_t cid_count, icd_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	int status = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	uint8_t ulp_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	mutex_lock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	memset(wrb, 0, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	be_wrb_hdr_prepare(wrb, sizeof(*pfw_cfg), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	be_cmd_hdr_prepare(&pfw_cfg->hdr, CMD_SUBSYSTEM_COMMON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 			   OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 			   EMBED_MBX_MAX_PAYLOAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	if (be_mbox_notify(ctrl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 			    "BG_%d : Failed in beiscsi_get_fw_config\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		goto fail_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	/* FW response formats depend on port id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	phba->fw_config.phys_port = pfw_cfg->phys_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	if (phba->fw_config.phys_port >= BEISCSI_PHYS_PORT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 			    "BG_%d : invalid physical port id %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 			    phba->fw_config.phys_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		goto fail_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	/* populate and check FW config against min and max values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	if (!is_chip_be2_be3r(phba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		phba->fw_config.eqid_count = pfw_cfg->eqid_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		phba->fw_config.cqid_count = pfw_cfg->cqid_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		if (phba->fw_config.eqid_count == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		    phba->fw_config.eqid_count > 2048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 				    "BG_%d : invalid EQ count %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 				    phba->fw_config.eqid_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 			goto fail_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		if (phba->fw_config.cqid_count == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		    phba->fw_config.cqid_count > 4096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 				    "BG_%d : invalid CQ count %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 				    phba->fw_config.cqid_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 			goto fail_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 			    "BG_%d : EQ_Count : %d CQ_Count : %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			    phba->fw_config.eqid_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 			    phba->fw_config.cqid_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	 * Check on which all ULP iSCSI Protocol is loaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	 * Set the Bit for those ULP. This set flag is used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	 * at all places in the code to check on which ULP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	 * iSCSi Protocol is loaded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	 **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		if (pfw_cfg->ulp[ulp_num].ulp_mode &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		    BEISCSI_ULP_ISCSI_INI_MODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 			set_bit(ulp_num, &phba->fw_config.ulp_supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 			/* Get the CID, ICD and Chain count for each ULP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 			phba->fw_config.iscsi_cid_start[ulp_num] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 				pfw_cfg->ulp[ulp_num].sq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 			phba->fw_config.iscsi_cid_count[ulp_num] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 				pfw_cfg->ulp[ulp_num].sq_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 			phba->fw_config.iscsi_icd_start[ulp_num] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 				pfw_cfg->ulp[ulp_num].icd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 			phba->fw_config.iscsi_icd_count[ulp_num] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 				pfw_cfg->ulp[ulp_num].icd_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 			phba->fw_config.iscsi_chain_start[ulp_num] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 				pfw_cfg->chain_icd[ulp_num].chain_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 			phba->fw_config.iscsi_chain_count[ulp_num] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 				pfw_cfg->chain_icd[ulp_num].chain_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 				    "BG_%d : Function loaded on ULP : %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 				    "\tiscsi_cid_count : %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 				    "\tiscsi_cid_start : %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 				    "\t iscsi_icd_count : %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 				    "\t iscsi_icd_start : %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 				    ulp_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 				    phba->fw_config.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 				    iscsi_cid_count[ulp_num],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 				    phba->fw_config.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 				    iscsi_cid_start[ulp_num],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 				    phba->fw_config.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 				    iscsi_icd_count[ulp_num],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 				    phba->fw_config.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 				    iscsi_icd_start[ulp_num]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	if (phba->fw_config.ulp_supported == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 			    "BG_%d : iSCSI initiator mode not set: ULP0 %x ULP1 %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 			    pfw_cfg->ulp[BEISCSI_ULP0].ulp_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 			    pfw_cfg->ulp[BEISCSI_ULP1].ulp_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		goto fail_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	 * ICD is shared among ULPs. Use icd_count of any one loaded ULP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	 **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	if (icd_count == 0 || icd_count > 65536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 			    "BG_%d: invalid ICD count %d\n", icd_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		goto fail_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		    BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	if (cid_count == 0 || cid_count > 4096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 			    "BG_%d: invalid CID count %d\n", cid_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		goto fail_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	 * Check FW is dual ULP aware i.e. can handle either
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	 * of the protocols.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	phba->fw_config.dual_ulp_aware = (pfw_cfg->function_mode &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 					  BEISCSI_FUNC_DUA_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		    "BG_%d : DUA Mode : 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 		    phba->fw_config.dual_ulp_aware);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	/* all set, continue using this FW config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) fail_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479)  * beiscsi_get_port_name()- Get port name for the function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480)  * @ctrl: ptr to Ctrl Info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)  * @phba: ptr to the dev priv structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)  * Get the alphanumeric character for port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) int beiscsi_get_port_name(struct be_ctrl_info *ctrl, struct beiscsi_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	struct be_mcc_wrb *wrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	struct be_cmd_get_port_name *ioctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	mutex_lock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	memset(wrb, 0, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	ioctl = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 			   OPCODE_COMMON_GET_PORT_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 			   EMBED_MBX_MAX_PAYLOAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	ret = be_mbox_notify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	phba->port_name = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		phba->port_name = ioctl->p.resp.port_names >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 				  (phba->fw_config.phys_port * 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 			    "BG_%d : GET_PORT_NAME ret 0x%x status 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 			    ret, ioctl->h.resp_hdr.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	if (phba->port_name == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		phba->port_name = '?';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) int beiscsi_set_host_data(struct beiscsi_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	struct be_ctrl_info *ctrl = &phba->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	struct be_cmd_set_host_data *ioctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	struct be_mcc_wrb *wrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	if (is_chip_be2_be3r(phba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	mutex_lock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	memset(wrb, 0, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	ioctl = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 			   OPCODE_COMMON_SET_HOST_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 			   EMBED_MBX_MAX_PAYLOAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	ioctl->param.req.param_id = BE_CMD_SET_HOST_PARAM_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	ioctl->param.req.param_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		snprintf((char *)ioctl->param.req.param_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 			 sizeof(ioctl->param.req.param_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 			 "Linux iSCSI v%s", BUILD_STR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len + 1, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	if (ioctl->param.req.param_len > BE_CMD_MAX_DRV_VERSION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		ioctl->param.req.param_len = BE_CMD_MAX_DRV_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	ret = be_mbox_notify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 			    "BG_%d : HBA set host driver version\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 		/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		 * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		 * Older FW versions return this error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 		if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 				ret == MCC_STATUS_INVALID_LENGTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 			__beiscsi_log(phba, KERN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 				      "BG_%d : HBA failed to set host driver version\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) int beiscsi_set_uer_feature(struct beiscsi_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	struct be_ctrl_info *ctrl = &phba->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	struct be_cmd_set_features *ioctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	struct be_mcc_wrb *wrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	mutex_lock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	memset(wrb, 0, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	ioctl = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	be_wrb_hdr_prepare(wrb, sizeof(*ioctl), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	be_cmd_hdr_prepare(&ioctl->h.req_hdr, CMD_SUBSYSTEM_COMMON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 			   OPCODE_COMMON_SET_FEATURES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 			   EMBED_MBX_MAX_PAYLOAD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	ioctl->feature = BE_CMD_SET_FEATURE_UER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	ioctl->param_len = sizeof(ioctl->param.req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	ioctl->param.req.uer = BE_CMD_UER_SUPP_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	ret = be_mbox_notify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		phba->ue2rp = ioctl->param.resp.ue2rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		set_bit(BEISCSI_HBA_UER_SUPP, &phba->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 			    "BG_%d : HBA error recovery supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		/**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 		 * Check "MCC_STATUS_INVALID_LENGTH" for SKH.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		 * Older FW versions return this error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		if (ret == MCC_STATUS_ILLEGAL_REQUEST ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		    ret == MCC_STATUS_INVALID_LENGTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 			__beiscsi_log(phba, KERN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 				      "BG_%d : HBA error recovery not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) static u32 beiscsi_get_post_stage(struct beiscsi_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	u32 sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	if (is_chip_be2_be3r(phba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		sem = ioread32(phba->csr_va + SLIPORT_SEMAPHORE_OFFSET_BEx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		pci_read_config_dword(phba->pcidev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 				      SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	return sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) int beiscsi_check_fw_rdy(struct beiscsi_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	u32 loop, post, rdy = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	loop = 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	while (loop--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		post = beiscsi_get_post_stage(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		if (post & POST_ERROR_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		if ((post & POST_STAGE_MASK) == POST_STAGE_ARMFW_RDY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 			rdy = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		msleep(60);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	if (!rdy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		__beiscsi_log(phba, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 			      "BC_%d : FW not ready 0x%x\n", post);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	return rdy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) int beiscsi_cmd_function_reset(struct beiscsi_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	struct be_ctrl_info *ctrl = &phba->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	struct be_post_sgl_pages_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	mutex_lock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	req = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 			   OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	status = be_mbox_notify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) int beiscsi_cmd_special_wrb(struct be_ctrl_info *ctrl, u32 load)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	u8 *endian_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	mutex_lock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	memset(wrb, 0, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	endian_check = (u8 *) wrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	if (load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		/* to start communicating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		*endian_check++ = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		*endian_check++ = 0x12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		*endian_check++ = 0x34;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		*endian_check++ = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		*endian_check++ = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		*endian_check++ = 0x56;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 		*endian_check++ = 0x78;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		*endian_check++ = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		/* to stop communicating */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		*endian_check++ = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		*endian_check++ = 0xAA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 		*endian_check++ = 0xBB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		*endian_check++ = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		*endian_check++ = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		*endian_check++ = 0xCC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		*endian_check++ = 0xDD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		*endian_check = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	be_dws_cpu_to_le(wrb, sizeof(*wrb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	status = be_mbox_notify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 			    "BC_%d : special WRB message failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) int beiscsi_init_sliport(struct beiscsi_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	/* check POST stage before talking to FW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	status = beiscsi_check_fw_rdy(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	/* clear all error states after checking FW rdy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	phba->state &= ~BEISCSI_HBA_IN_ERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	/* check again UER support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	phba->state &= ~BEISCSI_HBA_UER_SUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	 * SLI COMMON_FUNCTION_RESET completion is indicated by BMBX RDY bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	 * It should clean up any stale info in FW for this fn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	status = beiscsi_cmd_function_reset(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 			    "BC_%d : SLI Function Reset failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	/* indicate driver is loading */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	return beiscsi_cmd_special_wrb(&phba->ctrl, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)  * beiscsi_cmd_iscsi_cleanup()- Inform FW to cleanup EP data structures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)  * @phba: pointer to dev priv structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)  * @ulp: ULP number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)  * return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)  *	Success: 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)  *	Failure: Non-Zero Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) int beiscsi_cmd_iscsi_cleanup(struct beiscsi_hba *phba, unsigned short ulp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	struct be_ctrl_info *ctrl = &phba->ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	struct iscsi_cleanup_req_v1 *req_v1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	struct iscsi_cleanup_req *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	u16 hdr_ring_id, data_ring_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	struct be_mcc_wrb *wrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	mutex_lock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	wrb = wrb_from_mbox(&ctrl->mbox_mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	hdr_ring_id = HWI_GET_DEF_HDRQ_ID(phba, ulp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	data_ring_id = HWI_GET_DEF_BUFQ_ID(phba, ulp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	if (is_chip_be2_be3r(phba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		req = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 		be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 		be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 				   OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		req->chute = (1 << ulp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		/* BE2/BE3 FW creates 8-bit ring id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 		req->hdr_ring_id = hdr_ring_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 		req->data_ring_id = data_ring_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		req_v1 = embedded_payload(wrb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 		be_wrb_hdr_prepare(wrb, sizeof(*req_v1), true, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		be_cmd_hdr_prepare(&req_v1->hdr, CMD_SUBSYSTEM_ISCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 				   OPCODE_COMMON_ISCSI_CLEANUP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 				   sizeof(*req_v1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		req_v1->hdr.version = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 		req_v1->chute = (1 << ulp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		req_v1->hdr_ring_id = cpu_to_le16(hdr_ring_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		req_v1->data_ring_id = cpu_to_le16(data_ring_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	status = be_mbox_notify(ctrl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 	if (status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 			    "BG_%d : %s failed %d\n", __func__, ulp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	mutex_unlock(&ctrl->mbox_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785)  * beiscsi_detect_ue()- Detect Unrecoverable Error on adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)  * @phba: Driver priv structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)  * Read registers linked to UE and check for the UE status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) int beiscsi_detect_ue(struct beiscsi_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	uint32_t ue_hi = 0, ue_lo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	uint8_t i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	pci_read_config_dword(phba->pcidev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 			      PCICFG_UE_STATUS_LOW, &ue_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	pci_read_config_dword(phba->pcidev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 			      PCICFG_UE_STATUS_MASK_LOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 			      &ue_mask_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	pci_read_config_dword(phba->pcidev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 			      PCICFG_UE_STATUS_HIGH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 			      &ue_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	pci_read_config_dword(phba->pcidev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 			      PCICFG_UE_STATUS_MASK_HI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 			      &ue_mask_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	ue_lo = (ue_lo & ~ue_mask_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	ue_hi = (ue_hi & ~ue_mask_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	if (ue_lo || ue_hi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		set_bit(BEISCSI_HBA_IN_UE, &phba->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		__beiscsi_log(phba, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 			      "BC_%d : HBA error detected\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	if (ue_lo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		for (i = 0; ue_lo; ue_lo >>= 1, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 			if (ue_lo & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 				__beiscsi_log(phba, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 					      "BC_%d : UE_LOW %s bit set\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 					      desc_ue_status_low[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	if (ue_hi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		for (i = 0; ue_hi; ue_hi >>= 1, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 			if (ue_hi & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 				__beiscsi_log(phba, KERN_ERR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 					      "BC_%d : UE_HIGH %s bit set\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 					      desc_ue_status_hi[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)  * beiscsi_detect_tpe()- Detect Transient Parity Error on adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)  * @phba: Driver priv structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)  * Read SLIPORT SEMAPHORE register to check for UER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) int beiscsi_detect_tpe(struct beiscsi_hba *phba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	u32 post, status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	post = beiscsi_get_post_stage(phba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	status = post & POST_STAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	if ((status & POST_ERR_RECOVERY_CODE_MASK) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	    POST_STAGE_RECOVERABLE_ERR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		set_bit(BEISCSI_HBA_IN_TPE, &phba->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		__beiscsi_log(phba, KERN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 			      "BC_%d : HBA error recoverable: 0x%x\n", post);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		__beiscsi_log(phba, KERN_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 			      "BC_%d : HBA in UE: 0x%x\n", post);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }