^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* bnx2fc_tgt.c: QLogic Linux FCoE offload driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Handles operations such as session offload/upload etc, and manages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * session resources such as connection id and qp resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2008-2013 Broadcom Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2014-2016 QLogic Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2016-2017 Cavium Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * it under the terms of the GNU General Public License as published by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * the Free Software Foundation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "bnx2fc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static void bnx2fc_upld_timer(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static void bnx2fc_ofld_timer(struct timer_list *t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) struct fcoe_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) struct fc_rport_priv *rdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) struct bnx2fc_rport *tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) struct bnx2fc_rport *tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct bnx2fc_rport *tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static void bnx2fc_upld_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct bnx2fc_rport *tgt = from_timer(tgt, t, upld_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /* fake upload completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) wake_up_interruptible(&tgt->upld_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static void bnx2fc_ofld_timer(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct bnx2fc_rport *tgt = from_timer(tgt, t, ofld_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* NOTE: This function should never be called, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * offload should never timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * If the timer has expired, this session is dead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Clear offloaded flag and logout of this device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * Since OFFLOADED flag is cleared, this case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * will be considered as offload error and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * port will be logged off, and conn_id, session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * resources are freed up in bnx2fc_offload_session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) wake_up_interruptible(&tgt->ofld_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) timer_setup(&tgt->ofld_timer, bnx2fc_ofld_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) wait_event_interruptible(tgt->ofld_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) (test_bit(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) BNX2FC_FLAG_OFLD_REQ_CMPL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) &tgt->flags)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) flush_signals(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) del_timer_sync(&tgt->ofld_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static void bnx2fc_offload_session(struct fcoe_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) struct bnx2fc_rport *tgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) struct fc_rport_priv *rdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct fc_rport *rport = rdata->rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct bnx2fc_interface *interface = port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct bnx2fc_hba *hba = interface->hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) int rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Initialize bnx2fc_rport */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* NOTE: tgt is already bzero'd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) rval = bnx2fc_init_tgt(tgt, port, rdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) printk(KERN_ERR PFX "Failed to allocate conn id for "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) "port_id (%6x)\n", rport->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) goto tgt_init_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* Allocate session resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) rval = bnx2fc_alloc_session_resc(hba, tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) printk(KERN_ERR PFX "Failed to allocate resources\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) goto ofld_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * Initialize FCoE session offload process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * Upon completion of offload process add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * rport to list of rports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) retry_ofld:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) rval = bnx2fc_send_session_ofld_req(port, tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) printk(KERN_ERR PFX "ofld_req failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) goto ofld_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * wait for the session is offloaded and enabled. 3 Secs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * should be ample time for this process to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) bnx2fc_ofld_wait(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) &tgt->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) BNX2FC_TGT_DBG(tgt, "ctx_alloc_failure, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) "retry ofld..%d\n", i++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) msleep_interruptible(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) if (i > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) goto ofld_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) goto retry_ofld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) goto ofld_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) if (bnx2fc_map_doorbell(tgt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) printk(KERN_ERR PFX "map doorbell failed - no mem\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) goto ofld_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) rval = bnx2fc_send_session_enable_req(port, tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) if (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) pr_err(PFX "enable session failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) goto ofld_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) bnx2fc_ofld_wait(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) goto ofld_err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) ofld_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* couldn't offload the session. log off from this rport */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* Free session resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) bnx2fc_free_session_resc(hba, tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) tgt_init_err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (tgt->fcoe_conn_id != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) fc_rport_logoff(rdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct bnx2fc_cmd *io_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct bnx2fc_cmd *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) BNX2FC_TGT_DBG(tgt, "Entered flush_active_ios - %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) tgt->num_active_ios.counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) spin_lock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) tgt->flush_in_prog = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) list_for_each_entry_safe(io_req, tmp, &tgt->active_cmd_queue, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) list_del_init(&io_req->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) io_req->on_active_queue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) BNX2FC_IO_DBG(io_req, "cmd_queue cleanup\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (cancel_delayed_work(&io_req->timeout_work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) &io_req->req_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /* Handle eh_abort timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) BNX2FC_IO_DBG(io_req, "eh_abort for IO "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) "cleaned up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) complete(&io_req->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) kref_put(&io_req->refcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) bnx2fc_cmd_release); /* drop timer hold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) set_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /* Do not issue cleanup when disable request failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) rc = bnx2fc_initiate_cleanup(io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) BUG_ON(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) list_for_each_entry_safe(io_req, tmp, &tgt->active_tm_queue, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) list_del_init(&io_req->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) io_req->on_tmf_queue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (io_req->wait_for_abts_comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) complete(&io_req->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) list_for_each_entry_safe(io_req, tmp, &tgt->els_queue, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) list_del_init(&io_req->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) io_req->on_active_queue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) BNX2FC_IO_DBG(io_req, "els_queue cleanup\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (cancel_delayed_work(&io_req->timeout_work))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) kref_put(&io_req->refcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) bnx2fc_cmd_release); /* drop timer hold */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if ((io_req->cb_func) && (io_req->cb_arg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) io_req->cb_func(io_req->cb_arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) io_req->cb_arg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /* Do not issue cleanup when disable request failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) bnx2fc_process_cleanup_compl(io_req, io_req->task, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) rc = bnx2fc_initiate_cleanup(io_req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) BUG_ON(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) list_for_each_entry_safe(io_req, tmp, &tgt->io_retire_queue, link) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) list_del_init(&io_req->link);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (cancel_delayed_work(&io_req->timeout_work)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) &io_req->req_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* Handle eh_abort timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) BNX2FC_IO_DBG(io_req, "eh_abort for IO "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) "in retire_q\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) if (io_req->wait_for_abts_comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) complete(&io_req->abts_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) kref_put(&io_req->refcount, bnx2fc_cmd_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) BNX2FC_TGT_DBG(tgt, "IOs flushed = %d\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) spin_unlock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /* wait for active_ios to go to 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) while ((tgt->num_active_ios.counter != 0) && (i++ < BNX2FC_WAIT_CNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) msleep(25);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (tgt->num_active_ios.counter != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) printk(KERN_ERR PFX "CLEANUP on port 0x%x:"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) " active_ios = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) tgt->rdata->ids.port_id, tgt->num_active_ios.counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) spin_lock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) tgt->flush_in_prog = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) spin_unlock_bh(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) timer_setup(&tgt->upld_timer, bnx2fc_upld_timer, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) wait_event_interruptible(tgt->upld_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) (test_bit(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) BNX2FC_FLAG_UPLD_REQ_COMPL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) &tgt->flags)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) flush_signals(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) del_timer_sync(&tgt->upld_timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void bnx2fc_upload_session(struct fcoe_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) struct bnx2fc_rport *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) struct bnx2fc_interface *interface = port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct bnx2fc_hba *hba = interface->hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) BNX2FC_TGT_DBG(tgt, "upload_session: active_ios = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) tgt->num_active_ios.counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * Called with hba->hba_mutex held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * This is a blocking call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) bnx2fc_send_session_disable_req(port, tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * wait for upload to complete. 3 Secs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * should be sufficient time for this process to complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) bnx2fc_upld_wait(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * traverse thru the active_q and tmf_q and cleanup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * IOs in these lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) BNX2FC_TGT_DBG(tgt, "flush/upload - disable wait flags = 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) bnx2fc_flush_active_ios(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) /* Issue destroy KWQE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (test_bit(BNX2FC_FLAG_DISABLED, &tgt->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) BNX2FC_TGT_DBG(tgt, "send destroy req\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) clear_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) bnx2fc_send_session_destroy_req(hba, tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) /* wait for destroy to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) bnx2fc_upld_wait(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) } else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) " not sent to FW\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) printk(KERN_ERR PFX "ERROR!! DISABLE req timed out, destroy"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) " not sent to FW\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* Free session resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) bnx2fc_free_session_resc(hba, tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) struct fcoe_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct fc_rport_priv *rdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct fc_rport *rport = rdata->rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) struct bnx2fc_interface *interface = port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct bnx2fc_hba *hba = interface->hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) tgt->rport = rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) tgt->rdata = rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) tgt->port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (hba->num_ofld_sess >= BNX2FC_NUM_MAX_SESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) BNX2FC_TGT_DBG(tgt, "exceeded max sessions. logoff this tgt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) tgt->fcoe_conn_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) tgt->fcoe_conn_id = bnx2fc_alloc_conn_id(hba, tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (tgt->fcoe_conn_id == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) BNX2FC_TGT_DBG(tgt, "init_tgt - conn_id = 0x%x\n", tgt->fcoe_conn_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) tgt->max_sqes = BNX2FC_SQ_WQES_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) tgt->max_rqes = BNX2FC_RQ_WQES_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) tgt->max_cqes = BNX2FC_CQ_WQES_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) atomic_set(&tgt->free_sqes, BNX2FC_SQ_WQES_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* Initialize the toggle bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) tgt->sq_curr_toggle_bit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) tgt->cq_curr_toggle_bit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) tgt->sq_prod_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) tgt->cq_cons_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) tgt->rq_prod_idx = 0x8000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) tgt->rq_cons_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) atomic_set(&tgt->num_active_ios, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) tgt->retry_delay_timestamp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (rdata->flags & FC_RP_FLAGS_RETRY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) tgt->dev_type = TYPE_TAPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) tgt->io_timeout = 0; /* use default ULP timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) tgt->dev_type = TYPE_DISK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) tgt->io_timeout = BNX2FC_IO_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /* initialize sq doorbell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) sq_db->header.header = B577XX_DOORBELL_HDR_DB_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) sq_db->header.header |= B577XX_FCOE_CONNECTION_TYPE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) /* initialize rx doorbell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) rx_db->hdr.header = ((0x1 << B577XX_DOORBELL_HDR_RX_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) (0x1 << B577XX_DOORBELL_HDR_DB_TYPE_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) (B577XX_FCOE_CONNECTION_TYPE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) rx_db->params = (0x2 << B577XX_FCOE_RX_DOORBELL_NEGATIVE_ARM_SHIFT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) (0x3 << B577XX_FCOE_RX_DOORBELL_OPCODE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) spin_lock_init(&tgt->tgt_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) spin_lock_init(&tgt->cq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* Initialize active_cmd_queue list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) INIT_LIST_HEAD(&tgt->active_cmd_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) /* Initialize IO retire queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) INIT_LIST_HEAD(&tgt->io_retire_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) INIT_LIST_HEAD(&tgt->els_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) /* Initialize active_tm_queue list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) INIT_LIST_HEAD(&tgt->active_tm_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) init_waitqueue_head(&tgt->ofld_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) init_waitqueue_head(&tgt->upld_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * This event_callback is called after successful completion of libfc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * initiated target login. bnx2fc can proceed with initiating the session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * establishment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) void bnx2fc_rport_event_handler(struct fc_lport *lport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct fc_rport_priv *rdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) enum fc_rport_event event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct fcoe_port *port = lport_priv(lport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct bnx2fc_interface *interface = port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) struct bnx2fc_hba *hba = interface->hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) struct fc_rport *rport = rdata->rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) struct fc_rport_libfc_priv *rp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) struct bnx2fc_rport *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) u32 port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) BNX2FC_HBA_DBG(lport, "rport_event_hdlr: event = %d, port_id = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) event, rdata->ids.port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) switch (event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) case RPORT_EV_READY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (!rport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) printk(KERN_ERR PFX "rport is NULL: ERROR!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) rp = rport->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (rport->port_id == FC_FID_DIR_SERV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * bnx2fc_rport structure doesn't exist for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * directory server.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * We should not come here, as lport will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) * take care of fabric login
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) printk(KERN_ERR PFX "%x - rport_event_handler ERROR\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) rdata->ids.port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) if (rdata->spp_type != FC_TYPE_FCP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) BNX2FC_HBA_DBG(lport, "not FCP type target."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) " not offloading\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) BNX2FC_HBA_DBG(lport, "not FCP_TARGET"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) " not offloading\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * Offlaod process is protected with hba mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * Use the same mutex_lock for upload process too
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) mutex_lock(&hba->hba_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) tgt = (struct bnx2fc_rport *)&rp[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) /* This can happen when ADISC finds the same target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) BNX2FC_TGT_DBG(tgt, "already offloaded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) mutex_unlock(&hba->hba_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * Offload the session. This is a blocking call, and will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * wait until the session is offloaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) bnx2fc_offload_session(port, tgt, rdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) hba->num_ofld_sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /* Session is offloaded and enabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) /* This counter is protected with hba mutex */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) hba->num_ofld_sess++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) set_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * Offload or enable would have failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * In offload/enable completion path, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * rport would have already been removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) BNX2FC_TGT_DBG(tgt, "Port is being logged off as "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) "offloaded flag not set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) mutex_unlock(&hba->hba_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) case RPORT_EV_LOGO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) case RPORT_EV_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) case RPORT_EV_STOP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) port_id = rdata->ids.port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (port_id == FC_FID_DIR_SERV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (!rport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) printk(KERN_INFO PFX "%x - rport not created Yet!!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) rp = rport->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) mutex_lock(&hba->hba_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * Perform session upload. Note that rdata->peers is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * removed from disc->rports list before we get this event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) tgt = (struct bnx2fc_rport *)&rp[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) mutex_unlock(&hba->hba_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) bnx2fc_upload_session(port, tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) hba->num_ofld_sess--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) BNX2FC_TGT_DBG(tgt, "UPLOAD num_ofld_sess = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) hba->num_ofld_sess);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) * Try to wake up the linkdown wait thread. If num_ofld_sess
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * is 0, the waiting therad wakes up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if ((hba->wait_for_link_down) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) (hba->num_ofld_sess == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) wake_up_interruptible(&hba->shutdown_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) mutex_unlock(&hba->hba_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) case RPORT_EV_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * bnx2fc_tgt_lookup() - Lookup a bnx2fc_rport by port_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * @port: fcoe_port struct to lookup the target port on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) * @port_id: The remote port ID to look up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) u32 port_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct bnx2fc_interface *interface = port->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) struct bnx2fc_hba *hba = interface->hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct bnx2fc_rport *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) struct fc_rport_priv *rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) tgt = hba->tgt_ofld_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if ((tgt) && (tgt->port == port)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) rdata = tgt->rdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (rdata->ids.port_id == port_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) if (rdata->rp_state != RPORT_ST_DELETE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) BNX2FC_TGT_DBG(tgt, "rport "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) "obtained\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) return tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) BNX2FC_TGT_DBG(tgt, "rport 0x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) "is in DELETED state\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) rdata->ids.port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * bnx2fc_alloc_conn_id - allocates FCOE Connection id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * @hba: pointer to adapter structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * @tgt: pointer to bnx2fc_rport structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static u32 bnx2fc_alloc_conn_id(struct bnx2fc_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct bnx2fc_rport *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) u32 conn_id, next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) /* called with hba mutex held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) * tgt_ofld_list access is synchronized using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * both hba mutex and hba lock. Atleast hba mutex or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * hba lock needs to be held for read access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) spin_lock_bh(&hba->hba_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) next = hba->next_conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) conn_id = hba->next_conn_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (hba->next_conn_id == BNX2FC_NUM_MAX_SESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) hba->next_conn_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) while (hba->tgt_ofld_list[conn_id] != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) conn_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (conn_id == BNX2FC_NUM_MAX_SESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) conn_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (conn_id == next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) /* No free conn_ids are available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) spin_unlock_bh(&hba->hba_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) hba->tgt_ofld_list[conn_id] = tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) tgt->fcoe_conn_id = conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) spin_unlock_bh(&hba->hba_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) return conn_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /* called with hba mutex held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) spin_lock_bh(&hba->hba_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) hba->tgt_ofld_list[conn_id] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) spin_unlock_bh(&hba->hba_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * bnx2fc_alloc_session_resc - Allocate qp resources for the session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) struct bnx2fc_rport *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) dma_addr_t page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) int num_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) u32 *pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* Allocate and map SQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) CNIC_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) &tgt->sq_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (!tgt->sq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) printk(KERN_ERR PFX "unable to allocate SQ memory %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) tgt->sq_mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) goto mem_alloc_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* Allocate and map CQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) CNIC_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) &tgt->cq_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) if (!tgt->cq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) printk(KERN_ERR PFX "unable to allocate CQ memory %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) tgt->cq_mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) goto mem_alloc_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* Allocate and map RQ and RQ PBL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) CNIC_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) &tgt->rq_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (!tgt->rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) printk(KERN_ERR PFX "unable to allocate RQ memory %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) tgt->rq_mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) goto mem_alloc_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) CNIC_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) &tgt->rq_pbl_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (!tgt->rq_pbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) printk(KERN_ERR PFX "unable to allocate RQ PBL %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) tgt->rq_pbl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) goto mem_alloc_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) page = tgt->rq_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) pbl = (u32 *)tgt->rq_pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) while (num_pages--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) *pbl = (u32)page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) pbl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) *pbl = (u32)((u64)page >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) pbl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) page += CNIC_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* Allocate and map XFERQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) CNIC_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) tgt->xferq_mem_size, &tgt->xferq_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (!tgt->xferq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) printk(KERN_ERR PFX "unable to allocate XFERQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) tgt->xferq_mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) goto mem_alloc_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) /* Allocate and map CONFQ & CONFQ PBL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) CNIC_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) tgt->confq = dma_alloc_coherent(&hba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) tgt->confq_mem_size, &tgt->confq_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) if (!tgt->confq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) printk(KERN_ERR PFX "unable to allocate CONFQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) tgt->confq_mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) goto mem_alloc_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) tgt->confq_pbl_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) tgt->confq_pbl_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) tgt->confq_pbl_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) &tgt->confq_pbl_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) if (!tgt->confq_pbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) printk(KERN_ERR PFX "unable to allocate CONFQ PBL %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) tgt->confq_pbl_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) goto mem_alloc_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) page = tgt->confq_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) pbl = (u32 *)tgt->confq_pbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) while (num_pages--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) *pbl = (u32)page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) pbl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) *pbl = (u32)((u64)page >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) pbl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) page += CNIC_PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) /* Allocate and map ConnDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) tgt->conn_db_mem_size = sizeof(struct fcoe_conn_db);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) tgt->conn_db = dma_alloc_coherent(&hba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) tgt->conn_db_mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) &tgt->conn_db_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (!tgt->conn_db) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) printk(KERN_ERR PFX "unable to allocate conn_db %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) tgt->conn_db_mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) goto mem_alloc_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) /* Allocate and map LCQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) CNIC_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) &tgt->lcq_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) if (!tgt->lcq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) printk(KERN_ERR PFX "unable to allocate lcq %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) tgt->lcq_mem_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) goto mem_alloc_failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) tgt->conn_db->rq_prod = 0x8000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) mem_alloc_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * bnx2i_free_session_resc - free qp resources for the session
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * @hba: adapter structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * @tgt: bnx2fc_rport structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * Free QP resources - SQ/RQ/CQ/XFERQ memory and PBL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) struct bnx2fc_rport *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) void __iomem *ctx_base_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) spin_lock_bh(&tgt->cq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) ctx_base_ptr = tgt->ctx_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) tgt->ctx_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /* Free LCQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (tgt->lcq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) tgt->lcq, tgt->lcq_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) tgt->lcq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /* Free connDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (tgt->conn_db) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) dma_free_coherent(&hba->pcidev->dev, tgt->conn_db_mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) tgt->conn_db, tgt->conn_db_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) tgt->conn_db = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /* Free confq and confq pbl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) if (tgt->confq_pbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) dma_free_coherent(&hba->pcidev->dev, tgt->confq_pbl_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) tgt->confq_pbl, tgt->confq_pbl_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) tgt->confq_pbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (tgt->confq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) dma_free_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) tgt->confq, tgt->confq_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) tgt->confq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /* Free XFERQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) if (tgt->xferq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) dma_free_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) tgt->xferq, tgt->xferq_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) tgt->xferq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /* Free RQ PBL and RQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (tgt->rq_pbl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) dma_free_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) tgt->rq_pbl, tgt->rq_pbl_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) tgt->rq_pbl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) if (tgt->rq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) dma_free_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) tgt->rq, tgt->rq_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) tgt->rq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /* Free CQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (tgt->cq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) dma_free_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) tgt->cq, tgt->cq_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) tgt->cq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) /* Free SQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) if (tgt->sq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) dma_free_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) tgt->sq, tgt->sq_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) tgt->sq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) spin_unlock_bh(&tgt->cq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) if (ctx_base_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) iounmap(ctx_base_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) }