^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * QLogic Fibre Channel HBA Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2003-2014 QLogic Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "qla_def.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include "qla_gbl.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include "qla_target.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <scsi/scsicam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) if (vha->vp_idx && vha->timer_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) del_timer_sync(&vha->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) vha->timer_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static uint32_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) uint32_t vp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* Find an empty slot and assign an vp_id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) mutex_lock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (vp_id > ha->max_npiv_vports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) ql_dbg(ql_dbg_vport, vha, 0xa000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) "vp_id %d is bigger than max-supported %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) vp_id, ha->max_npiv_vports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) mutex_unlock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) return vp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) set_bit(vp_id, ha->vp_idx_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) ha->num_vhosts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) vha->vp_idx = vp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) spin_lock_irqsave(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) list_add_tail(&vha->list, &ha->vp_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) spin_unlock_irqrestore(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) spin_lock_irqsave(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) qlt_update_vp_map(vha, SET_VP_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) spin_unlock_irqrestore(&ha->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) mutex_unlock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return vp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) uint16_t vp_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u8 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) mutex_lock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * Wait for all pending activities to finish before removing vport from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * Lock needs to be held for safe removal from the list (it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * ensures no active vp_list traversal while the vport is removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * from the queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) for (i = 0; i < 10; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (wait_event_timeout(vha->vref_waitq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) !atomic_read(&vha->vref_count), HZ) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) spin_lock_irqsave(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (atomic_read(&vha->vref_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) ql_dbg(ql_dbg_vport, vha, 0xfffa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) "vha->vref_count=%u timeout\n", vha->vref_count.counter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) vha->vref_count = (atomic_t)ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) list_del(&vha->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) qlt_update_vp_map(vha, RESET_VP_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) spin_unlock_irqrestore(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) vp_id = vha->vp_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) ha->num_vhosts--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) clear_bit(vp_id, ha->vp_idx_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) mutex_unlock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) static scsi_qla_host_t *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) scsi_qla_host_t *vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) struct scsi_qla_host *tvha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) spin_lock_irqsave(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* Locate matching device in database. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) spin_unlock_irqrestore(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) spin_unlock_irqrestore(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * qla2x00_mark_vp_devices_dead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * Updates fcport state when device goes offline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * Input:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * ha = adapter block pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * fcport = port structure pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * None.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * Context:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * !!! NOTE !!!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * This function, if called in contexts other than vp create, disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * or delete, please make sure this is synchronized with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * delete thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) fc_port_t *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) list_for_each_entry(fcport, &vha->vp_fcports, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) ql_dbg(ql_dbg_vport, vha, 0xa001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) "Marking port dead, loop_id=0x%04x : %x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) fcport->loop_id, fcport->vha->vp_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) qla2x00_mark_device_lost(vha, fcport, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) qla24xx_disable_vp(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int ret = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) fc_port_t *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) if (vha->hw->flags.fw_started)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) atomic_set(&vha->loop_state, LOOP_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) list_for_each_entry(fcport, &vha->vp_fcports, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) fcport->logout_on_delete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) qla2x00_mark_all_devices_lost(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* Remove port id from vp target map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) spin_lock_irqsave(&vha->hw->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) qlt_update_vp_map(vha, RESET_AL_PA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) qla2x00_mark_vp_devices_dead(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) atomic_set(&vha->vp_state, VP_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) vha->flags.management_server_logged_in = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (ret == QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) qla24xx_enable_vp(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* Check if physical ha port is Up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) atomic_read(&base_vha->loop_state) == LOOP_DEAD ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) !(ha->current_topology & ISP_CFG_F)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) vha->vp_err_state = VP_ERR_PORTDWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) ql_dbg(ql_dbg_taskm, vha, 0x800b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) "%s skip enable. loop_state %x topo %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) __func__, base_vha->loop_state.counter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) ha->current_topology);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) goto enable_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* Initialize the new vport unless it is a persistent port */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) mutex_lock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) ret = qla24xx_modify_vp_config(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) mutex_unlock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (ret != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) goto enable_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ql_dbg(ql_dbg_taskm, vha, 0x801a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) enable_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) ql_dbg(ql_dbg_taskm, vha, 0x801b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) qla24xx_configure_vp(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct fc_vport *fc_vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) fc_vport = vha->fc_vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ql_dbg(ql_dbg_vport, vha, 0xa002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) "%s: change request #3.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (ret != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) "receiving of RSCN requests: 0x%x.\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) /* Corresponds to SCR enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) vha->flags.online = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (qla24xx_configure_vhba(vha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) atomic_set(&vha->vp_state, VP_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) scsi_qla_host_t *vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) struct qla_hw_data *ha = rsp->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) spin_lock_irqsave(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) list_for_each_entry(vha, &ha->vp_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) if (vha->vp_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (test_bit(VPORT_DELETE, &vha->dpc_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) atomic_inc(&vha->vref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) spin_unlock_irqrestore(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) switch (mb[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) case MBA_LIP_OCCURRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) case MBA_LOOP_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) case MBA_LOOP_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) case MBA_LIP_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) case MBA_POINT_TO_POINT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) case MBA_CHG_IN_CONNECTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) ql_dbg(ql_dbg_async, vha, 0x5024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) "Async_event for VP[%d], mb=0x%x vha=%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) i, *mb, vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) qla2x00_async_event(vha, rsp, mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) case MBA_PORT_UPDATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) case MBA_RSCN_UPDATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if ((mb[3] & 0xff) == vha->vp_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) ql_dbg(ql_dbg_async, vha, 0x5024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) "Async_event for VP[%d], mb=0x%x vha=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) i, *mb, vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) qla2x00_async_event(vha, rsp, mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) spin_lock_irqsave(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) atomic_dec(&vha->vref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) wake_up(&vha->vref_waitq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) spin_unlock_irqrestore(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) fc_port_t *fcport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * To exclusively reset vport, we need to log it out first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * Note: This control_vp can fail if ISP reset is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * issued, this is expected, as the vp would be already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * logged out due to ISP reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) list_for_each_entry(fcport, &vha->vp_fcports, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) fcport->logout_on_delete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) * Physical port will do most of the abort and recovery work. We can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * just treat it as a loop down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) atomic_set(&vha->loop_state, LOOP_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) qla2x00_mark_all_devices_lost(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (!atomic_read(&vha->loop_down_timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) ql_dbg(ql_dbg_taskm, vha, 0x801d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) "Scheduling enable of Vport %d.\n", vha->vp_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) return qla24xx_enable_vp(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* Check if Fw is ready to configure VP first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (test_bit(VP_CONFIG_OK, &base_vha->vp_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* VP acquired. complete port configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) ql_dbg(ql_dbg_dpc, vha, 0x4014,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) "Configure VP scheduled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) qla24xx_configure_vp(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ql_dbg(ql_dbg_dpc, vha, 0x4015,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) "Configure VP end.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) if (test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (atomic_read(&vha->loop_state) == LOOP_READY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) qla24xx_process_purex_list(&vha->purex_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) clear_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ql_dbg(ql_dbg_dpc, vha, 0x4016,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) "FCPort update scheduled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) qla2x00_update_fcports(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) ql_dbg(ql_dbg_dpc, vha, 0x4017,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) "FCPort update end.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) atomic_read(&vha->loop_state) != LOOP_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (!vha->relogin_jif ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) time_after_eq(jiffies, vha->relogin_jif)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) vha->relogin_jif = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) ql_dbg(ql_dbg_dpc, vha, 0x4018,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) "Relogin needed scheduled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) qla24xx_post_relogin_work(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) clear_bit(RESET_ACTIVE, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) ql_dbg(ql_dbg_dpc, vha, 0x401a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) "Loop resync scheduled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) qla2x00_loop_resync(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) ql_dbg(ql_dbg_dpc, vha, 0x401b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) "Loop resync end.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) "Exiting %s.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) scsi_qla_host_t *vp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) if (vha->vp_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (list_empty(&ha->vp_list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (!(ha->current_topology & ISP_CFG_F))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) spin_lock_irqsave(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) list_for_each_entry(vp, &ha->vp_list, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (vp->vp_idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) atomic_inc(&vp->vref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) spin_unlock_irqrestore(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) qla2x00_do_dpc_vp(vp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) spin_lock_irqsave(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) atomic_dec(&vp->vref_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) spin_unlock_irqrestore(&ha->vport_slock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct qla_hw_data *ha = base_vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) scsi_qla_host_t *vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) uint8_t port_name[WWN_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return VPCERR_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* Check up the F/W and H/W support NPIV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (!ha->flags.npiv_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return VPCERR_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) /* Check up whether npiv supported switch presented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) if (!(ha->switch_cap & FLOGI_MID_SUPPORT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) return VPCERR_NO_FABRIC_SUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /* Check up unique WWPN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) u64_to_wwn(fc_vport->port_name, port_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (!memcmp(port_name, base_vha->port_name, WWN_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return VPCERR_BAD_WWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) vha = qla24xx_find_vhost_by_name(ha, port_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) return VPCERR_BAD_WWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /* Check up max-npiv-supports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (ha->num_vhosts > ha->max_npiv_vports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) ql_dbg(ql_dbg_vport, vha, 0xa004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) "num_vhosts %ud is bigger "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) "than max_npiv_vports %ud.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) ha->num_vhosts, ha->max_npiv_vports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return VPCERR_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) scsi_qla_host_t *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) qla24xx_create_vhost(struct fc_vport *fc_vport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct qla_hw_data *ha = base_vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) scsi_qla_host_t *vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) struct scsi_host_template *sht = &qla2xxx_driver_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct Scsi_Host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) vha = qla2x00_create_host(sht, ha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (!vha) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ql_log(ql_log_warn, vha, 0xa005,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) "scsi_host_alloc() failed for vport.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) host = vha->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) fc_vport->dd_data = vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) /* New host info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) u64_to_wwn(fc_vport->node_name, vha->node_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) u64_to_wwn(fc_vport->port_name, vha->port_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) vha->fc_vport = fc_vport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) vha->device_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) vha->vp_idx = qla24xx_allocate_vp_id(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (vha->vp_idx > ha->max_npiv_vports) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ql_dbg(ql_dbg_vport, vha, 0xa006,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) "Couldn't allocate vp_id.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) goto create_vhost_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) vha->dpc_flags = 0L;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) ha->dpc_active = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * To fix the issue of processing a parent's RSCN for the vport before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * its SCR is complete.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) set_bit(VP_SCR_NEEDED, &vha->vp_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) atomic_set(&vha->loop_state, LOOP_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) qla2x00_start_timer(vha, WATCH_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) vha->req = base_vha->req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) vha->flags.nvme_enabled = base_vha->flags.nvme_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) host->can_queue = base_vha->req->length + 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) host->cmd_per_lun = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) host->max_cmd_len = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) host->max_cmd_len = MAX_CMDSZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) host->max_channel = MAX_BUSES - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) host->max_lun = ql2xmaxlun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) host->unique_id = host->host_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) host->max_id = ha->max_fibre_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) host->transportt = qla2xxx_transport_vport_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) ql_dbg(ql_dbg_vport, vha, 0xa007,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) "Detect vport hba %ld at address = %p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) vha->host_no, vha);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) vha->flags.init_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) mutex_lock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) set_bit(vha->vp_idx, ha->vp_idx_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) ha->cur_vport_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) mutex_unlock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) return vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) create_vhost_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) uint16_t que_id = req->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) dma_free_coherent(&ha->pdev->dev, (req->length + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) sizeof(request_t), req->ring, req->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) req->ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) req->dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) if (que_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) ha->req_q_map[que_id] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) mutex_lock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) clear_bit(que_id, ha->req_qid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) mutex_unlock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) kfree(req->outstanding_cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) kfree(req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) uint16_t que_id = rsp->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) if (rsp->msix && rsp->msix->have_irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) free_irq(rsp->msix->vector, rsp->msix->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) rsp->msix->have_irq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) rsp->msix->in_use = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) rsp->msix->handle = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) sizeof(response_t), rsp->ring, rsp->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) rsp->ring = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) rsp->dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (que_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) ha->rsp_q_map[que_id] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) mutex_lock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) clear_bit(que_id, ha->rsp_qid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) mutex_unlock(&ha->vport_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) kfree(rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) rsp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int ret = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) if (req && vha->flags.qpairs_req_created) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) req->options |= BIT_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) ret = qla25xx_init_req_que(vha, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (ret != QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) return QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) qla25xx_free_req_que(vha, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) int ret = QLA_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (rsp && vha->flags.qpairs_rsp_created) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) rsp->options |= BIT_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) ret = qla25xx_init_rsp_que(vha, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) if (ret != QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) return QLA_FUNCTION_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) qla25xx_free_rsp_que(vha, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* Delete all queues for a given vhost */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) qla25xx_delete_queues(struct scsi_qla_host *vha)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) int cnt, ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) struct req_que *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) struct rsp_que *rsp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) struct qla_qpair *qpair, *tqpair;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) if (ql2xmqsupport || ql2xnvmeenable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) qp_list_elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) qla2xxx_delete_qpair(vha, qpair);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) /* Delete request queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) req = ha->req_q_map[cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) if (req && test_bit(cnt, ha->req_qid_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) ret = qla25xx_delete_req_que(vha, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) if (ret != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) ql_log(ql_log_warn, vha, 0x00ea,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) "Couldn't delete req que %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) req->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) /* Delete response queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) rsp = ha->rsp_q_map[cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) ret = qla25xx_delete_rsp_que(vha, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) if (ret != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) ql_log(ql_log_warn, vha, 0x00eb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) "Couldn't delete rsp que %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) rsp->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos, bool startqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) struct req_que *req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) uint16_t que_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) device_reg_t *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) uint32_t cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) if (req == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) ql_log(ql_log_fatal, base_vha, 0x00d9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) "Failed to allocate memory for request queue.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) req->length = REQUEST_ENTRY_CNT_24XX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) req->ring = dma_alloc_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) (req->length + 1) * sizeof(request_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) &req->dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) if (req->ring == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) ql_log(ql_log_fatal, base_vha, 0x00da,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) "Failed to allocate memory for request_ring.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) goto que_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) ret = qla2x00_alloc_outstanding_cmds(ha, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) if (ret != QLA_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) goto que_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) mutex_lock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) if (que_id >= ha->max_req_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) mutex_unlock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) ql_log(ql_log_warn, base_vha, 0x00db,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) "No resources to create additional request queue.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) goto que_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) set_bit(que_id, ha->req_qid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) ha->req_q_map[que_id] = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) req->rid = rid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) req->vp_idx = vp_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) req->qos = qos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) que_id, req->rid, req->vp_idx, req->qos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) ql_dbg(ql_dbg_init, base_vha, 0x00dc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) "queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) que_id, req->rid, req->vp_idx, req->qos);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (rsp_que < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) req->rsp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) req->rsp = ha->rsp_q_map[rsp_que];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) /* Use alternate PCI bus number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (MSB(req->rid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) options |= BIT_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /* Use alternate PCI devfn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) if (LSB(req->rid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) options |= BIT_5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) req->options = options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) "options=0x%x.\n", req->options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) ql_dbg(ql_dbg_init, base_vha, 0x00dd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) "options=0x%x.\n", req->options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) req->outstanding_cmds[cnt] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) req->current_outstanding_cmd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) req->ring_ptr = req->ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) req->ring_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) req->cnt = req->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) req->id = que_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) reg = ISP_QUE_REG(ha, que_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) req->req_q_in = ®->isp25mq.req_q_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) req->req_q_out = ®->isp25mq.req_q_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) req->max_q_depth = ha->req_q_map[0]->max_q_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) req->out_ptr = (uint16_t *)(req->ring + req->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) mutex_unlock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) "ring_ptr=%p ring_index=%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) "cnt=%d id=%d max_q_depth=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) req->ring_ptr, req->ring_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) req->cnt, req->id, req->max_q_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) ql_dbg(ql_dbg_init, base_vha, 0x00de,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) "ring_ptr=%p ring_index=%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) "cnt=%d id=%d max_q_depth=%d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) req->ring_ptr, req->ring_index, req->cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) req->id, req->max_q_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (startqp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) ret = qla25xx_init_req_que(base_vha, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (ret != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) ql_log(ql_log_fatal, base_vha, 0x00df,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) "%s failed.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) mutex_lock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) clear_bit(que_id, ha->req_qid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) mutex_unlock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) goto que_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) vha->flags.qpairs_req_created = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) return req->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) que_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) qla25xx_free_req_que(base_vha, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) static void qla_do_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct scsi_qla_host *vha = qpair->vha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) spin_lock_irqsave(&qpair->qp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) qla24xx_process_response_queue(vha, qpair->rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) spin_unlock_irqrestore(&qpair->qp_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /* create response queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) uint8_t vp_idx, uint16_t rid, struct qla_qpair *qpair, bool startqp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct rsp_que *rsp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) uint16_t que_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) device_reg_t *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (rsp == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) ql_log(ql_log_warn, base_vha, 0x0066,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) "Failed to allocate memory for response queue.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) rsp->length = RESPONSE_ENTRY_CNT_MQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) (rsp->length + 1) * sizeof(response_t),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) &rsp->dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (rsp->ring == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) ql_log(ql_log_warn, base_vha, 0x00e1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) "Failed to allocate memory for response ring.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) goto que_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) mutex_lock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) if (que_id >= ha->max_rsp_queues) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) mutex_unlock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) ql_log(ql_log_warn, base_vha, 0x00e2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) "No resources to create additional request queue.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) goto que_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) set_bit(que_id, ha->rsp_qid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) rsp->msix = qpair->msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) ha->rsp_q_map[que_id] = rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) rsp->rid = rid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) rsp->vp_idx = vp_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) rsp->hw = ha;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) ql_dbg(ql_dbg_init, base_vha, 0x00e4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) que_id, rsp->rid, rsp->vp_idx, rsp->hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) /* Use alternate PCI bus number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (MSB(rsp->rid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) options |= BIT_4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /* Use alternate PCI devfn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) if (LSB(rsp->rid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) options |= BIT_5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /* Enable MSIX handshake mode on for uncapable adapters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) if (!IS_MSIX_NACK_CAPABLE(ha))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) options |= BIT_6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) /* Set option to indicate response queue creation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) options |= BIT_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) rsp->options = options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) rsp->id = que_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) reg = ISP_QUE_REG(ha, que_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) rsp->rsp_q_in = ®->isp25mq.rsp_q_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) rsp->rsp_q_out = ®->isp25mq.rsp_q_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) mutex_unlock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) rsp->options, rsp->id, rsp->rsp_q_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) rsp->rsp_q_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) ql_dbg(ql_dbg_init, base_vha, 0x00e5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) rsp->options, rsp->id, rsp->rsp_q_in,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) rsp->rsp_q_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) ret = qla25xx_request_irq(ha, qpair, qpair->msix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) ha->flags.disable_msix_handshake ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) goto que_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (startqp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) ret = qla25xx_init_rsp_que(base_vha, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) if (ret != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) ql_log(ql_log_fatal, base_vha, 0x00e7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) "%s failed.\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) mutex_lock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) clear_bit(que_id, ha->rsp_qid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) mutex_unlock(&ha->mq_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) goto que_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) vha->flags.qpairs_rsp_created = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) rsp->req = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) qla2x00_init_response_q_entries(rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (qpair->hw->wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) INIT_WORK(&qpair->q_work, qla_do_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) return rsp->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) que_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) qla25xx_free_rsp_que(base_vha, rsp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static void qla_ctrlvp_sp_done(srb_t *sp, int res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) if (sp->comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) complete(sp->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /* don't free sp here. Let the caller do the free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * qla24xx_control_vp() - Enable a virtual port for given host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * @vha: adapter block pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * @cmd: command type to be sent for enable virtual port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * Return: qla2xxx local function return status code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) int rval = QLA_MEMORY_ALLOC_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) struct qla_hw_data *ha = vha->hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) int vp_index = vha->vp_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) DECLARE_COMPLETION_ONSTACK(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) srb_t *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) ql_dbg(ql_dbg_vport, vha, 0x10c1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) "Entered %s cmd %x index %d.\n", __func__, cmd, vp_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) return QLA_PARAMETER_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) if (!sp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) sp->type = SRB_CTRL_VP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) sp->name = "ctrl_vp";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) sp->comp = ∁
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) sp->done = qla_ctrlvp_sp_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) sp->u.iocb_cmd.u.ctrlvp.cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) rval = qla2x00_start_sp(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (rval != QLA_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) ql_dbg(ql_dbg_async, vha, 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) "%s: %s Failed submission. %x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) __func__, sp->name, rval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) sp->name, sp->handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) wait_for_completion(&comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) sp->comp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) rval = sp->rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) switch (rval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) case QLA_FUNCTION_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) __func__, sp->name, rval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) case QLA_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) __func__, sp->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) __func__, sp->name, rval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) sp->free(sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }