^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * ibmvfc.c -- driver for IBM Power Virtual Fibre Channel Adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written By: Brian King <brking@linux.vnet.ibm.com>, IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) IBM Corporation, 2008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/stringify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/bsg-lib.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/vio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <scsi/scsi_transport_fc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <scsi/scsi_bsg_fc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include "ibmvfc.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static unsigned int init_timeout = IBMVFC_INIT_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static unsigned int default_timeout = IBMVFC_DEFAULT_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static u64 max_lun = IBMVFC_MAX_LUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static unsigned int max_targets = IBMVFC_MAX_TARGETS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static unsigned int max_requests = IBMVFC_MAX_REQUESTS_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static LIST_HEAD(ibmvfc_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) static DEFINE_SPINLOCK(ibmvfc_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static struct scsi_transport_template *ibmvfc_transport_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) MODULE_DESCRIPTION("IBM Virtual Fibre Channel Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) MODULE_VERSION(IBMVFC_DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) "[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) module_param_named(default_timeout, default_timeout, uint, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) MODULE_PARM_DESC(default_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) "Default timeout in seconds for initialization and EH commands. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) "[Default=" __stringify(IBMVFC_DEFAULT_TIMEOUT) "]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) module_param_named(max_requests, max_requests, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) "[Default=" __stringify(IBMVFC_MAX_REQUESTS_DEFAULT) "]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) module_param_named(max_lun, max_lun, ullong, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) MODULE_PARM_DESC(max_lun, "Maximum allowed LUN. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) "[Default=" __stringify(IBMVFC_MAX_LUN) "]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) module_param_named(max_targets, max_targets, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) MODULE_PARM_DESC(max_targets, "Maximum allowed targets. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) "[Default=" __stringify(IBMVFC_MAX_TARGETS) "]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) module_param_named(disc_threads, disc_threads, uint, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) MODULE_PARM_DESC(disc_threads, "Number of device discovery threads to use. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) "[Default=" __stringify(IBMVFC_MAX_DISC_THREADS) "]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) module_param_named(debug, ibmvfc_debug, uint, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) MODULE_PARM_DESC(debug, "Enable driver debug information. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) "[Default=" __stringify(IBMVFC_DEBUG) "]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) module_param_named(log_level, log_level, uint, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) "[Default=" __stringify(IBMVFC_DEFAULT_LOG_LEVEL) "]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) module_param_named(cls3_error, cls3_error, uint, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) MODULE_PARM_DESC(cls3_error, "Enable FC Class 3 Error Recovery. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) "[Default=" __stringify(IBMVFC_CLS3_ERROR) "]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) u16 error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u8 result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) u8 retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) int log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) } cmd_status [] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_ESTABLISH, DID_ERROR, 1, 1, "unable to establish" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_FAULT, DID_OK, 1, 0, "transport fault" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) { IBMVFC_FABRIC_MAPPED, IBMVFC_CMD_TIMEOUT, DID_TIME_OUT, 1, 1, "command timeout" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) { IBMVFC_FABRIC_MAPPED, IBMVFC_ENETDOWN, DID_TRANSPORT_DISRUPTED, 1, 1, "network down" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) { IBMVFC_FABRIC_MAPPED, IBMVFC_HW_FAILURE, DID_ERROR, 1, 1, "hardware failure" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DOWN_ERR, DID_REQUEUE, 0, 0, "link down" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_DEAD_ERR, DID_ERROR, 0, 0, "link dead" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) { IBMVFC_FABRIC_MAPPED, IBMVFC_UNABLE_TO_REGISTER, DID_ERROR, 1, 1, "unable to register" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_BUSY, DID_BUS_BUSY, 1, 0, "transport busy" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) { IBMVFC_VIOS_FAILURE, IBMVFC_SW_FAILURE, DID_ERROR, 0, 1, "software failure" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) { IBMVFC_VIOS_FAILURE, IBMVFC_INVALID_PARAMETER, DID_ERROR, 0, 1, "invalid parameter" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) { IBMVFC_VIOS_FAILURE, IBMVFC_MISSING_PARAMETER, DID_ERROR, 0, 1, "missing parameter" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) { IBMVFC_VIOS_FAILURE, IBMVFC_HOST_IO_BUS, DID_ERROR, 1, 1, "host I/O bus failure" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED, DID_ERROR, 0, 1, "transaction cancelled" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) { IBMVFC_VIOS_FAILURE, IBMVFC_TRANS_CANCELLED_IMPLICIT, DID_ERROR, 0, 1, "transaction cancelled implicit" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) { IBMVFC_VIOS_FAILURE, IBMVFC_INSUFFICIENT_RESOURCE, DID_REQUEUE, 1, 1, "insufficient resources" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) { IBMVFC_VIOS_FAILURE, IBMVFC_PLOGI_REQUIRED, DID_ERROR, 0, 1, "port login required" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) { IBMVFC_VIOS_FAILURE, IBMVFC_COMMAND_FAILED, DID_ERROR, 1, 1, "command failed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) { IBMVFC_FC_FAILURE, IBMVFC_INVALID_ELS_CMD_CODE, DID_ERROR, 0, 1, "invalid ELS command code" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) { IBMVFC_FC_FAILURE, IBMVFC_INVALID_VERSION, DID_ERROR, 0, 1, "invalid version level" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_ERROR, DID_ERROR, 1, 1, "logical error" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) { IBMVFC_FC_FAILURE, IBMVFC_INVALID_CT_IU_SIZE, DID_ERROR, 0, 1, "invalid CT_IU size" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) { IBMVFC_FC_FAILURE, IBMVFC_LOGICAL_BUSY, DID_REQUEUE, 1, 0, "logical busy" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) { IBMVFC_FC_FAILURE, IBMVFC_PROTOCOL_ERROR, DID_ERROR, 1, 1, "protocol error" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) { IBMVFC_FC_FAILURE, IBMVFC_UNABLE_TO_PERFORM_REQ, DID_ERROR, 1, 1, "unable to perform request" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) { IBMVFC_FC_FAILURE, IBMVFC_CMD_NOT_SUPPORTED, DID_ERROR, 0, 0, "command not supported" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) { IBMVFC_FC_FAILURE, IBMVFC_SERVER_NOT_AVAIL, DID_ERROR, 0, 1, "server not available" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) { IBMVFC_FC_FAILURE, IBMVFC_CMD_IN_PROGRESS, DID_ERROR, 0, 1, "command already in progress" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static void ibmvfc_npiv_login(struct ibmvfc_host *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static void ibmvfc_npiv_logout(struct ibmvfc_host *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) static const char *unknown_error = "unknown error";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #ifdef CONFIG_SCSI_IBMVFC_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * ibmvfc_trc_start - Log a start trace entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static void ibmvfc_trc_start(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct ibmvfc_cmd *vfc_cmd = &evt->iu.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct ibmvfc_trace_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) entry = &vhost->trace[vhost->trace_index++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) entry->evt = evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) entry->time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) entry->fmt = evt->crq.format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) entry->type = IBMVFC_TRC_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) switch (entry->fmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) case IBMVFC_CMD_FORMAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) entry->op_code = vfc_cmd->iu.cdb[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) entry->tmf_flags = vfc_cmd->iu.tmf_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) entry->u.start.xfer_len = be32_to_cpu(vfc_cmd->iu.xfer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) case IBMVFC_MAD_FORMAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) entry->op_code = be32_to_cpu(mad->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * ibmvfc_trc_end - Log an end trace entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static void ibmvfc_trc_end(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) entry->evt = evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) entry->time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) entry->fmt = evt->crq.format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) entry->type = IBMVFC_TRC_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) switch (entry->fmt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) case IBMVFC_CMD_FORMAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) entry->op_code = vfc_cmd->iu.cdb[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) entry->scsi_id = be64_to_cpu(vfc_cmd->tgt_scsi_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) entry->lun = scsilun_to_int(&vfc_cmd->iu.lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) entry->tmf_flags = vfc_cmd->iu.tmf_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) entry->u.end.status = be16_to_cpu(vfc_cmd->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) entry->u.end.error = be16_to_cpu(vfc_cmd->error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) entry->u.end.fcp_rsp_flags = vfc_cmd->rsp.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) entry->u.end.rsp_code = vfc_cmd->rsp.data.info.rsp_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) entry->u.end.scsi_status = vfc_cmd->rsp.scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) case IBMVFC_MAD_FORMAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) entry->op_code = be32_to_cpu(mad->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) entry->u.end.status = be16_to_cpu(mad->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define ibmvfc_trc_start(evt) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define ibmvfc_trc_end(evt) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * ibmvfc_get_err_index - Find the index into cmd_status for the fcp response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * @status: status / error class
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * @error: error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * index into cmd_status / -EINVAL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static int ibmvfc_get_err_index(u16 status, u16 error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) for (i = 0; i < ARRAY_SIZE(cmd_status); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) if ((cmd_status[i].status & status) == cmd_status[i].status &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) cmd_status[i].error == error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * ibmvfc_get_cmd_error - Find the error description for the fcp response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * @status: status / error class
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * @error: error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * error description string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static const char *ibmvfc_get_cmd_error(u16 status, u16 error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) int rc = ibmvfc_get_err_index(status, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (rc >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return cmd_status[rc].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) return unknown_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * ibmvfc_get_err_result - Find the scsi status to return for the fcp response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * @vfc_cmd: ibmvfc command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * SCSI result value to return for completed command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int fc_rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if ((rsp->flags & FCP_RSP_LEN_VALID) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) rsp->data.info.rsp_code))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) err = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) if (err >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return rsp->scsi_status | (cmd_status[err].result << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return rsp->scsi_status | (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * ibmvfc_retry_cmd - Determine if error status is retryable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * @status: status / error class
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) * @error: error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * 1 if error should be retried / 0 if it should not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static int ibmvfc_retry_cmd(u16 status, u16 error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) int rc = ibmvfc_get_err_index(status, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (rc >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return cmd_status[rc].retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static const char *unknown_fc_explain = "unknown fc explain";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) u16 fc_explain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) } ls_explain [] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) { 0x00, "no additional explanation" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) { 0x01, "service parameter error - options" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) { 0x03, "service parameter error - initiator control" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) { 0x05, "service parameter error - recipient control" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) { 0x07, "service parameter error - received data field size" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) { 0x09, "service parameter error - concurrent seq" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) { 0x0B, "service parameter error - credit" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) { 0x0D, "invalid N_Port/F_Port_Name" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) { 0x0E, "invalid node/Fabric Name" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) { 0x0F, "invalid common service parameters" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) { 0x11, "invalid association header" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) { 0x13, "association header required" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) { 0x15, "invalid originator S_ID" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) { 0x17, "invalid OX_ID-RX-ID combination" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) { 0x19, "command (request) already in progress" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) { 0x1E, "N_Port Login requested" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) { 0x1F, "Invalid N_Port_ID" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) u16 fc_explain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) } gs_explain [] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) { 0x00, "no additional explanation" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) { 0x01, "port identifier not registered" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) { 0x02, "port name not registered" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) { 0x03, "node name not registered" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) { 0x04, "class of service not registered" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) { 0x06, "initial process associator not registered" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) { 0x07, "FC-4 TYPEs not registered" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) { 0x08, "symbolic port name not registered" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) { 0x09, "symbolic node name not registered" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) { 0x0A, "port type not registered" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) { 0xF0, "authorization exception" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) { 0xF1, "authentication exception" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) { 0xF2, "data base full" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) { 0xF3, "data base empty" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) { 0xF4, "processing request" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) { 0xF5, "unable to verify connection" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) { 0xF6, "devices not in a common zone" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * ibmvfc_get_ls_explain - Return the FC Explain description text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * @status: FC Explain status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * error string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static const char *ibmvfc_get_ls_explain(u16 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) for (i = 0; i < ARRAY_SIZE(ls_explain); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (ls_explain[i].fc_explain == status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) return ls_explain[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return unknown_fc_explain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * ibmvfc_get_gs_explain - Return the FC Explain description text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * @status: FC Explain status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * error string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) static const char *ibmvfc_get_gs_explain(u16 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) for (i = 0; i < ARRAY_SIZE(gs_explain); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (gs_explain[i].fc_explain == status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return gs_explain[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return unknown_fc_explain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) enum ibmvfc_fc_type fc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) } fc_type [] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) { IBMVFC_FABRIC_REJECT, "fabric reject" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) { IBMVFC_PORT_REJECT, "port reject" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) { IBMVFC_LS_REJECT, "ELS reject" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) { IBMVFC_FABRIC_BUSY, "fabric busy" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) { IBMVFC_PORT_BUSY, "port busy" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) { IBMVFC_BASIC_REJECT, "basic reject" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static const char *unknown_fc_type = "unknown fc type";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * ibmvfc_get_fc_type - Return the FC Type description text
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * @status: FC Type error status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * error string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static const char *ibmvfc_get_fc_type(u16 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) for (i = 0; i < ARRAY_SIZE(fc_type); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (fc_type[i].fc_type == status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return fc_type[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return unknown_fc_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * ibmvfc_set_tgt_action - Set the next init action for the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * @tgt: ibmvfc target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * @action: action to perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) * 0 if action changed / non-zero if not changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) enum ibmvfc_target_action action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) int rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) switch (tgt->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) case IBMVFC_TGT_ACTION_LOGOUT_RPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) action == IBMVFC_TGT_ACTION_DEL_RPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) tgt->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) tgt->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) tgt->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) tgt->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) case IBMVFC_TGT_ACTION_DEL_RPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) if (action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) tgt->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) case IBMVFC_TGT_ACTION_DELETED_RPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) tgt->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) tgt->add_rport = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * ibmvfc_set_host_state - Set the state for the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * @state: state to set host to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * 0 if state changed / non-zero if not changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) static int ibmvfc_set_host_state(struct ibmvfc_host *vhost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) enum ibmvfc_host_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) switch (vhost->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) case IBMVFC_HOST_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) vhost->state = state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * ibmvfc_set_host_action - Set the next init action for the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * @action: action to perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) enum ibmvfc_host_action action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) switch (action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) case IBMVFC_HOST_ACTION_ALLOC_TGTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) vhost->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) case IBMVFC_HOST_ACTION_LOGO_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) vhost->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) case IBMVFC_HOST_ACTION_INIT_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (vhost->action == IBMVFC_HOST_ACTION_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) vhost->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) case IBMVFC_HOST_ACTION_QUERY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) switch (vhost->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) case IBMVFC_HOST_ACTION_INIT_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) case IBMVFC_HOST_ACTION_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) vhost->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) case IBMVFC_HOST_ACTION_TGT_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) vhost->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) case IBMVFC_HOST_ACTION_REENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) case IBMVFC_HOST_ACTION_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) vhost->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) case IBMVFC_HOST_ACTION_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) case IBMVFC_HOST_ACTION_TGT_DEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) case IBMVFC_HOST_ACTION_LOGO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) case IBMVFC_HOST_ACTION_QUERY_TGTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) case IBMVFC_HOST_ACTION_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) switch (vhost->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) case IBMVFC_HOST_ACTION_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) case IBMVFC_HOST_ACTION_REENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) vhost->action = action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * ibmvfc_reinit_host - Re-start host initialization (no NPIV Login)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) vhost->state == IBMVFC_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) scsi_block_requests(vhost->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) vhost->reinit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * ibmvfc_del_tgt - Schedule cleanup and removal of the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * @tgt: ibmvfc target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * @job_step: job step to perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) wake_up(&tgt->vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * ibmvfc_link_down - Handle a link down event from the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) * @state: ibmvfc host state to enter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static void ibmvfc_link_down(struct ibmvfc_host *vhost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) enum ibmvfc_host_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) struct ibmvfc_target *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) scsi_block_requests(vhost->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) list_for_each_entry(tgt, &vhost->targets, queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) ibmvfc_del_tgt(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) ibmvfc_set_host_state(vhost, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) vhost->events_to_log |= IBMVFC_AE_LINKDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * ibmvfc_init_host - Start host initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static void ibmvfc_init_host(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) struct ibmvfc_target *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) dev_err(vhost->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) "Host initialization retries exceeded. Taking adapter offline\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) vhost->async_crq.cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) list_for_each_entry(tgt, &vhost->targets, queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) ibmvfc_del_tgt(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) scsi_block_requests(vhost->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) vhost->job_step = ibmvfc_npiv_login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * ibmvfc_send_crq - Send a CRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) * @word1: the first 64 bits of the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) * @word2: the second 64 bits of the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) struct vio_dev *vdev = to_vio_dev(vhost->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * ibmvfc_send_crq_init - Send a CRQ init message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) static int ibmvfc_send_crq_init(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) ibmvfc_dbg(vhost, "Sending CRQ init\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * ibmvfc_send_crq_init_complete - Send a CRQ init complete message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) ibmvfc_dbg(vhost, "Sending CRQ init complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) * ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) * the crq with the hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) long rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) struct vio_dev *vdev = to_vio_dev(vhost->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) struct ibmvfc_crq_queue *crq = &vhost->crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) ibmvfc_dbg(vhost, "Releasing CRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) free_irq(vdev->irq, vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) tasklet_kill(&vhost->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) vhost->state = IBMVFC_NO_CRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) vhost->logged_in = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) free_page((unsigned long)crq->msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * ibmvfc_reenable_crq_queue - reenables the CRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) struct vio_dev *vdev = to_vio_dev(vhost->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* Re-enable the CRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) dev_err(vhost->dev, "Error enabling adapter (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * ibmvfc_reset_crq - resets a crq after a failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) struct vio_dev *vdev = to_vio_dev(vhost->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) struct ibmvfc_crq_queue *crq = &vhost->crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /* Close the CRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) vhost->state = IBMVFC_NO_CRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) vhost->logged_in = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) /* Clean out the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) memset(crq->msgs, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) crq->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) /* And re-open it again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) crq->msg_token, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) if (rc == H_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /* Adapter is good, but other end is not ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) dev_warn(vhost->dev, "Partner adapter not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) else if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) * ibmvfc_valid_event - Determines if event is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * @pool: event_pool that contains the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) * @evt: ibmvfc event to be checked for validity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * 1 if event is valid / 0 if event is not valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) int index = evt - pool->events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) if (index < 0 || index >= pool->size) /* outside of bounds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (evt != pool->events + index) /* unaligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * ibmvfc_free_event - Free the specified event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * @evt: ibmvfc_event to be freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) static void ibmvfc_free_event(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct ibmvfc_event_pool *pool = &vhost->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) BUG_ON(!ibmvfc_valid_event(pool, evt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) BUG_ON(atomic_inc_return(&evt->free) != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) list_add_tail(&evt->queue, &vhost->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * ibmvfc_scsi_eh_done - EH done function for queuecommand commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * This function does not setup any error status, that must be done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * before this function gets called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) struct scsi_cmnd *cmnd = evt->cmnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) if (cmnd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) scsi_dma_unmap(cmnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) cmnd->scsi_done(cmnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (evt->eh_comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) complete(evt->eh_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * ibmvfc_fail_request - Fail request with specified error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * @error_code: error code to fail request with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (evt->cmnd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) evt->cmnd->result = (error_code << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) evt->done = ibmvfc_scsi_eh_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) list_del(&evt->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) del_timer(&evt->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) ibmvfc_trc_end(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) evt->done(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * ibmvfc_purge_requests - Our virtual adapter just shut down. Purge any sent requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * @error_code: error code to fail requests with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) struct ibmvfc_event *evt, *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) ibmvfc_dbg(vhost, "Purging all requests\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) ibmvfc_fail_request(evt, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * @vhost: struct ibmvfc host to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) ibmvfc_purge_requests(vhost, DID_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) * __ibmvfc_reset_host - Reset the connection to the server (no locking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * @vhost: struct ibmvfc host to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) scsi_block_requests(vhost->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) vhost->job_step = ibmvfc_npiv_logout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) ibmvfc_hard_reset_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * ibmvfc_reset_host - Reset the connection to the server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) __ibmvfc_reset_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * ibmvfc_retry_host_init - Retry host initialization if allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * Returns: 1 if init will be retried / 0 if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) int retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) vhost->delay_init = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) dev_err(vhost->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) "Host initialization retries exceeded. Taking adapter offline\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) __ibmvfc_reset_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) * __ibmvfc_get_target - Find the specified scsi_target (no locking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * @starget: scsi target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * ibmvfc_target struct / NULL if not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) static struct ibmvfc_target *__ibmvfc_get_target(struct scsi_target *starget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) struct ibmvfc_target *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) list_for_each_entry(tgt, &vhost->targets, queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (tgt->target_id == starget->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) kref_get(&tgt->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) return tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * ibmvfc_get_target - Find the specified scsi_target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * @starget: scsi target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) * ibmvfc_target struct / NULL if not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) static struct ibmvfc_target *ibmvfc_get_target(struct scsi_target *starget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) struct ibmvfc_target *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) tgt = __ibmvfc_get_target(starget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) return tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) * ibmvfc_get_host_speed - Get host port speed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) * @shost: scsi host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) static void ibmvfc_get_host_speed(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (vhost->state == IBMVFC_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) switch (be64_to_cpu(vhost->login_buf->resp.link_speed) / 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) case 10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) fc_host_speed(shost) = FC_PORTSPEED_16GBIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) ibmvfc_log(vhost, 3, "Unknown port speed: %lld Gbit\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) be64_to_cpu(vhost->login_buf->resp.link_speed) / 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * ibmvfc_get_host_port_state - Get host port state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * @shost: scsi host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) static void ibmvfc_get_host_port_state(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) switch (vhost->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) case IBMVFC_INITIALIZING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) case IBMVFC_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) case IBMVFC_LINK_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) case IBMVFC_LINK_DEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) case IBMVFC_HOST_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) case IBMVFC_HALTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) fc_host_port_state(shost) = FC_PORTSTATE_BLOCKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) case IBMVFC_NO_CRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) ibmvfc_log(vhost, 3, "Unknown port state: %d\n", vhost->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * ibmvfc_set_rport_dev_loss_tmo - Set rport's device loss timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * @rport: rport struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * @timeout: timeout value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) static void ibmvfc_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) if (timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) rport->dev_loss_tmo = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) rport->dev_loss_tmo = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * ibmvfc_release_tgt - Free memory allocated for a target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) * @kref: kref struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) static void ibmvfc_release_tgt(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) struct ibmvfc_target *tgt = container_of(kref, struct ibmvfc_target, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) kfree(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * ibmvfc_get_starget_node_name - Get SCSI target's node name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * @starget: scsi target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static void ibmvfc_get_starget_node_name(struct scsi_target *starget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) fc_starget_port_name(starget) = tgt ? tgt->ids.node_name : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) if (tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * ibmvfc_get_starget_port_name - Get SCSI target's port name
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * @starget: scsi target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) static void ibmvfc_get_starget_port_name(struct scsi_target *starget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) fc_starget_port_name(starget) = tgt ? tgt->ids.port_name : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) if (tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * ibmvfc_get_starget_port_id - Get SCSI target's port ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) * @starget: scsi target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) static void ibmvfc_get_starget_port_id(struct scsi_target *starget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) struct ibmvfc_target *tgt = ibmvfc_get_target(starget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) fc_starget_port_id(starget) = tgt ? tgt->scsi_id : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) if (tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * ibmvfc_wait_while_resetting - Wait while the host resets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) static int ibmvfc_wait_while_resetting(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) long timeout = wait_event_timeout(vhost->init_wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) ((vhost->state == IBMVFC_ACTIVE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) vhost->state == IBMVFC_HOST_OFFLINE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) vhost->state == IBMVFC_LINK_DEAD) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) vhost->action == IBMVFC_HOST_ACTION_NONE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) (init_timeout * HZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) return timeout ? 0 : -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * ibmvfc_issue_fc_host_lip - Re-initiate link initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * @shost: scsi host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) static int ibmvfc_issue_fc_host_lip(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) dev_err(vhost->dev, "Initiating host LIP. Resetting connection\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) ibmvfc_reset_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) return ibmvfc_wait_while_resetting(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * ibmvfc_gather_partition_info - Gather info about the LPAR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) struct device_node *rootdn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) const unsigned int *num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) rootdn = of_find_node_by_path("/");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (!rootdn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) name = of_get_property(rootdn, "ibm,partition-name", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) if (name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) strncpy(vhost->partition_name, name, sizeof(vhost->partition_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) num = of_get_property(rootdn, "ibm,partition-no", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) if (num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) vhost->partition_number = *num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) of_node_put(rootdn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) * ibmvfc_set_login_info - Setup info for NPIV login
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) struct ibmvfc_npiv_login *login_info = &vhost->login_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct device_node *of_node = vhost->dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) const char *location;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) memset(login_info, 0, sizeof(*login_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) login_info->ostype = cpu_to_be32(IBMVFC_OS_LINUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) login_info->max_dma_len = cpu_to_be64(IBMVFC_MAX_SECTORS << 9);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) login_info->max_payload = cpu_to_be32(sizeof(struct ibmvfc_fcp_cmd_iu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) login_info->max_response = cpu_to_be32(sizeof(struct ibmvfc_fcp_rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) login_info->partition_num = cpu_to_be32(vhost->partition_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) login_info->vfc_frame_version = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) login_info->fcp_version = cpu_to_be16(3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) login_info->flags = cpu_to_be16(IBMVFC_FLUSH_ON_HALT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (vhost->client_migrated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) login_info->flags |= cpu_to_be16(IBMVFC_CLIENT_MIGRATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) login_info->async.len = cpu_to_be32(vhost->async_crq.size * sizeof(*vhost->async_crq.msgs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) strncpy(login_info->device_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) location = of_get_property(of_node, "ibm,loc-code", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) location = location ? location : dev_name(vhost->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) strncpy(login_info->drc_name, location, IBMVFC_MAX_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) * @vhost: ibmvfc host who owns the event pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) * Returns zero on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) struct ibmvfc_event_pool *pool = &vhost->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) if (!pool->events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) pool->iu_storage = dma_alloc_coherent(vhost->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) pool->size * sizeof(*pool->iu_storage),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) &pool->iu_token, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (!pool->iu_storage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) kfree(pool->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) for (i = 0; i < pool->size; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) struct ibmvfc_event *evt = &pool->events[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) atomic_set(&evt->free, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) evt->crq.valid = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) evt->xfer_iu = pool->iu_storage + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) evt->vhost = vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) evt->ext_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) list_add_tail(&evt->queue, &vhost->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * ibmvfc_free_event_pool - Frees memory of the event pool of a host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * @vhost: ibmvfc host who owns the event pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) struct ibmvfc_event_pool *pool = &vhost->pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) for (i = 0; i < pool->size; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) list_del(&pool->events[i].queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) BUG_ON(atomic_read(&pool->events[i].free) != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) if (pool->events[i].ext_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) dma_pool_free(vhost->sg_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) pool->events[i].ext_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) pool->events[i].ext_list_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) kfree(pool->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) dma_free_coherent(vhost->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) pool->size * sizeof(*pool->iu_storage),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) pool->iu_storage, pool->iu_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * ibmvfc_get_event - Gets the next free event in pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * Returns a free event from the pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) struct ibmvfc_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) BUG_ON(list_empty(&vhost->free));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) atomic_set(&evt->free, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) list_del(&evt->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) return evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) * ibmvfc_init_event - Initialize fields in an event struct that are always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) * required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) * @evt: The event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) * @done: Routine to call when the event is responded to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) * @format: SRP or MAD format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) static void ibmvfc_init_event(struct ibmvfc_event *evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) void (*done) (struct ibmvfc_event *), u8 format)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) evt->cmnd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) evt->sync_iu = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) evt->crq.format = format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) evt->done = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) evt->eh_comp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) * ibmvfc_map_sg_list - Initialize scatterlist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) * @scmd: scsi command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) * @nseg: number of scatterlist segments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) * @md: memory descriptor list to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct srp_direct_buf *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) scsi_for_each_sg(scmd, sg, nseg, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) md[i].va = cpu_to_be64(sg_dma_address(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) md[i].len = cpu_to_be32(sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) md[i].key = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) * ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes descriptor fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) * @scmd: struct scsi_cmnd with the scatterlist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) * @vfc_cmd: vfc_cmd that contains the memory descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) * @dev: device for which to map dma memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) * 0 on success / non-zero on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) struct ibmvfc_event *evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) struct ibmvfc_cmd *vfc_cmd, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) int sg_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) struct srp_direct_buf *data = &vfc_cmd->ioba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) struct ibmvfc_host *vhost = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) if (cls3_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) vfc_cmd->flags |= cpu_to_be16(IBMVFC_CLASS_3_ERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) sg_mapped = scsi_dma_map(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) if (!sg_mapped) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) vfc_cmd->flags |= cpu_to_be16(IBMVFC_NO_MEM_DESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) } else if (unlikely(sg_mapped < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) scmd_printk(KERN_ERR, scmd, "Failed to map DMA buffer for command\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) return sg_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) if (scmd->sc_data_direction == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) vfc_cmd->flags |= cpu_to_be16(IBMVFC_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) vfc_cmd->iu.add_cdb_len |= IBMVFC_WRDATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) vfc_cmd->flags |= cpu_to_be16(IBMVFC_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) vfc_cmd->iu.add_cdb_len |= IBMVFC_RDDATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) if (sg_mapped == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) ibmvfc_map_sg_list(scmd, sg_mapped, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) vfc_cmd->flags |= cpu_to_be16(IBMVFC_SCATTERLIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) if (!evt->ext_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) evt->ext_list = dma_pool_alloc(vhost->sg_pool, GFP_ATOMIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) &evt->ext_list_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) if (!evt->ext_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) scsi_dma_unmap(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) scmd_printk(KERN_ERR, scmd, "Can't allocate memory for scatterlist\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) ibmvfc_map_sg_list(scmd, sg_mapped, evt->ext_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) data->va = cpu_to_be64(evt->ext_list_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) data->len = cpu_to_be32(sg_mapped * sizeof(struct srp_direct_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) data->key = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) * ibmvfc_timeout - Internal command timeout handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) * @evt: struct ibmvfc_event that timed out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) * Called when an internally generated command times out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) static void ibmvfc_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) struct ibmvfc_event *evt = from_timer(evt, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) ibmvfc_reset_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * ibmvfc_send_event - Transforms event to u64 array and calls send_crq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * @evt: event to be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) * @timeout: timeout in seconds - 0 means do not time command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * Returns the value returned from ibmvfc_send_crq(). (Zero for success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) static int ibmvfc_send_event(struct ibmvfc_event *evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) struct ibmvfc_host *vhost, unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) __be64 *crq_as_u64 = (__be64 *) &evt->crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) /* Copy the IU into the transfer area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) *evt->xfer_iu = evt->iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) if (evt->crq.format == IBMVFC_CMD_FORMAT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) evt->xfer_iu->cmd.tag = cpu_to_be64((u64)evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) else if (evt->crq.format == IBMVFC_MAD_FORMAT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) evt->xfer_iu->mad_common.tag = cpu_to_be64((u64)evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) list_add_tail(&evt->queue, &vhost->sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) timer_setup(&evt->timer, ibmvfc_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) if (timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) evt->timer.expires = jiffies + (timeout * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) add_timer(&evt->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) be64_to_cpu(crq_as_u64[1])))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) list_del(&evt->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) del_timer(&evt->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) /* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) * Firmware will send a CRQ with a transport event (0xFF) to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) * tell this client what has happened to the transport. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) * will be handled in ibmvfc_handle_crq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) if (rc == H_CLOSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) dev_warn(vhost->dev, "Send warning. Receive queue closed, will retry.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) if (evt->cmnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) scsi_dma_unmap(evt->cmnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) dev_err(vhost->dev, "Send error (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (evt->cmnd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) evt->cmnd->result = DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) evt->done = ibmvfc_scsi_eh_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) evt->done(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) ibmvfc_trc_start(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) * ibmvfc_log_error - Log an error for the failed command if appropriate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) * @evt: ibmvfc event to log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) static void ibmvfc_log_error(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) struct scsi_cmnd *cmnd = evt->cmnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) const char *err = unknown_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) int index = ibmvfc_get_err_index(be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) int logerr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) int rsp_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) if (index >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) logerr = cmd_status[index].log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) err = cmd_status[index].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) if (!logerr && (vhost->log_level <= (IBMVFC_DEFAULT_LOG_LEVEL + 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (rsp->flags & FCP_RSP_LEN_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) rsp_code = rsp->data.info.rsp_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * ibmvfc_relogin - Log back into the specified device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) * @sdev: scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) static void ibmvfc_relogin(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) struct ibmvfc_host *vhost = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) struct ibmvfc_target *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) list_for_each_entry(tgt, &vhost->targets, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) if (rport == tgt->rport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) ibmvfc_del_tgt(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) ibmvfc_reinit_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) * ibmvfc_scsi_done - Handle responses from commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) * @evt: ibmvfc event to be handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) * Used as a callback when sending scsi cmds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) struct ibmvfc_cmd *vfc_cmd = &evt->xfer_iu->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) struct ibmvfc_fcp_rsp *rsp = &vfc_cmd->rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) struct scsi_cmnd *cmnd = evt->cmnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) u32 rsp_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) u32 sense_len = be32_to_cpu(rsp->fcp_sense_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) if (cmnd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) if (be16_to_cpu(vfc_cmd->response_flags) & IBMVFC_ADAPTER_RESID_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) scsi_set_resid(cmnd, be32_to_cpu(vfc_cmd->adapter_resid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) else if (rsp->flags & FCP_RESID_UNDER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) scsi_set_resid(cmnd, be32_to_cpu(rsp->fcp_resid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) scsi_set_resid(cmnd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (vfc_cmd->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) cmnd->result = ibmvfc_get_err_result(vfc_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) if (rsp->flags & FCP_RSP_LEN_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) rsp_len = be32_to_cpu(rsp->fcp_rsp_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if ((sense_len + rsp_len) > SCSI_SENSE_BUFFERSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) sense_len = SCSI_SENSE_BUFFERSIZE - rsp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) if ((be16_to_cpu(vfc_cmd->status) & IBMVFC_VIOS_FAILURE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) (be16_to_cpu(vfc_cmd->error) == IBMVFC_PLOGI_REQUIRED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) ibmvfc_relogin(cmnd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) cmnd->result = (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) ibmvfc_log_error(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) if (!cmnd->result &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->underflow))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) cmnd->result = (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) scsi_dma_unmap(cmnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) cmnd->scsi_done(cmnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) if (evt->eh_comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) complete(evt->eh_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) * ibmvfc_host_chkready - Check if the host can accept commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) * @vhost: struct ibmvfc host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) * 1 if host can accept command / 0 if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) static inline int ibmvfc_host_chkready(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) switch (vhost->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) case IBMVFC_LINK_DEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) case IBMVFC_HOST_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) case IBMVFC_NO_CRQ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) case IBMVFC_INITIALIZING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) case IBMVFC_HALTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) case IBMVFC_LINK_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) result = DID_REQUEUE << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) case IBMVFC_ACTIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) * ibmvfc_queuecommand - The queuecommand function of the scsi template
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) * @cmnd: struct scsi_cmnd to be executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) * @done: Callback function to be called when cmnd is completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) void (*done) (struct scsi_cmnd *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) struct ibmvfc_cmd *vfc_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) struct ibmvfc_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) if (unlikely((rc = fc_remote_port_chkready(rport))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) unlikely((rc = ibmvfc_host_chkready(vhost)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) cmnd->result = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) done(cmnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) cmnd->result = (DID_OK << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) evt->cmnd = cmnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) cmnd->scsi_done = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) vfc_cmd = &evt->iu.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) memset(vfc_cmd, 0, sizeof(*vfc_cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) vfc_cmd->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) vfc_cmd->resp.len = cpu_to_be32(sizeof(vfc_cmd->rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) vfc_cmd->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) vfc_cmd->payload_len = cpu_to_be32(sizeof(vfc_cmd->iu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) vfc_cmd->resp_len = cpu_to_be32(sizeof(vfc_cmd->rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) vfc_cmd->cancel_key = cpu_to_be32((unsigned long)cmnd->device->hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) vfc_cmd->tgt_scsi_id = cpu_to_be64(rport->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) vfc_cmd->iu.xfer_len = cpu_to_be32(scsi_bufflen(cmnd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) int_to_scsilun(cmnd->device->lun, &vfc_cmd->iu.lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) memcpy(vfc_cmd->iu.cdb, cmnd->cmnd, cmnd->cmd_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) if (cmnd->flags & SCMD_TAGGED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) vfc_cmd->task_tag = cpu_to_be64(cmnd->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) vfc_cmd->iu.pri_task_attr = IBMVFC_SIMPLE_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) return ibmvfc_send_event(evt, vhost, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) if (rc == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) scmd_printk(KERN_ERR, cmnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) "Failed to map DMA buffer for command. rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) cmnd->result = DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) done(cmnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) static DEF_SCSI_QCMD(ibmvfc_queuecommand)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) * ibmvfc_sync_completion - Signal that a synchronous command has completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) static void ibmvfc_sync_completion(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) /* copy the response back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (evt->sync_iu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) *evt->sync_iu = *evt->xfer_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) complete(&evt->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) * ibmvfc_bsg_timeout_done - Completion handler for cancelling BSG commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) * @evt: struct ibmvfc_event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) static void ibmvfc_bsg_timeout_done(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) vhost->aborting_passthru = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) dev_info(vhost->dev, "Passthru command cancelled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * ibmvfc_bsg_timeout - Handle a BSG timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * @job: struct bsg_job that timed out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) static int ibmvfc_bsg_timeout(struct bsg_job *job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) unsigned long port_id = (unsigned long)job->dd_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) struct ibmvfc_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) struct ibmvfc_tmf *tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (vhost->aborting_passthru || vhost->state != IBMVFC_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) __ibmvfc_reset_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) vhost->aborting_passthru = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) tmf = &evt->iu.tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) memset(tmf, 0, sizeof(*tmf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) tmf->common.version = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) tmf->common.length = cpu_to_be16(sizeof(*tmf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) tmf->scsi_id = cpu_to_be64(port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) tmf->cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) tmf->my_cancel_key = cpu_to_be32(IBMVFC_INTERNAL_CANCEL_KEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) rc = ibmvfc_send_event(evt, vhost, default_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) vhost->aborting_passthru = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) dev_err(vhost->dev, "Failed to send cancel event. rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) dev_info(vhost->dev, "Cancelling passthru command to port id 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * ibmvfc_bsg_plogi - PLOGI into a target to handle a BSG command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * @vhost: struct ibmvfc_host to send command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) * @port_id: port ID to send command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) struct ibmvfc_port_login *plogi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) struct ibmvfc_target *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) struct ibmvfc_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) union ibmvfc_iu rsp_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) int rc = 0, issue_login = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) list_for_each_entry(tgt, &vhost->targets, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (tgt->scsi_id == port_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) issue_login = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) if (!issue_login)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (unlikely((rc = ibmvfc_host_chkready(vhost))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) plogi = &evt->iu.plogi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) memset(plogi, 0, sizeof(*plogi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) plogi->common.version = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) plogi->common.length = cpu_to_be16(sizeof(*plogi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) plogi->scsi_id = cpu_to_be64(port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) evt->sync_iu = &rsp_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) init_completion(&evt->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) rc = ibmvfc_send_event(evt, vhost, default_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) wait_for_completion(&evt->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) if (rsp_iu.plogi.common.status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) * ibmvfc_bsg_request - Handle a BSG request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) * @job: struct bsg_job to be executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) static int ibmvfc_bsg_request(struct bsg_job *job)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) struct ibmvfc_host *vhost = shost_priv(fc_bsg_to_shost(job));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) struct fc_rport *rport = fc_bsg_to_rport(job);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) struct ibmvfc_passthru_mad *mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) struct ibmvfc_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) union ibmvfc_iu rsp_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) unsigned long flags, port_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) struct fc_bsg_request *bsg_request = job->request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) struct fc_bsg_reply *bsg_reply = job->reply;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) unsigned int code = bsg_request->msgcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) int rc = 0, req_seg, rsp_seg, issue_login = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) u32 fc_flags, rsp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) bsg_reply->reply_payload_rcv_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) if (rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) port_id = rport->port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) switch (code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) case FC_BSG_HST_ELS_NOLOGIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) (bsg_request->rqst_data.h_els.port_id[1] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) bsg_request->rqst_data.h_els.port_id[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) case FC_BSG_RPT_ELS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) fc_flags = IBMVFC_FC_ELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) case FC_BSG_HST_CT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) issue_login = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) (bsg_request->rqst_data.h_ct.port_id[1] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) bsg_request->rqst_data.h_ct.port_id[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) case FC_BSG_RPT_CT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) fc_flags = IBMVFC_FC_CT_IU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) if (port_id == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) if (!mutex_trylock(&vhost->passthru_mutex))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) job->dd_data = (void *)port_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) req_seg = dma_map_sg(vhost->dev, job->request_payload.sg_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) job->request_payload.sg_cnt, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) if (!req_seg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) mutex_unlock(&vhost->passthru_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) rsp_seg = dma_map_sg(vhost->dev, job->reply_payload.sg_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) if (!rsp_seg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) job->request_payload.sg_cnt, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) mutex_unlock(&vhost->passthru_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) if (req_seg > 1 || rsp_seg > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (issue_login)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) rc = ibmvfc_bsg_plogi(vhost, port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) if (unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) unlikely((rc = ibmvfc_host_chkready(vhost)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) mad = &evt->iu.passthru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) memset(mad, 0, sizeof(*mad));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) mad->common.version = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) mad->cmd_ioba.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) offsetof(struct ibmvfc_passthru_mad, iu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) mad->iu.cmd_len = cpu_to_be32(job->request_payload.payload_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) mad->iu.rsp_len = cpu_to_be32(job->reply_payload.payload_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) mad->iu.flags = cpu_to_be32(fc_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) mad->iu.cancel_key = cpu_to_be32(IBMVFC_PASSTHRU_CANCEL_KEY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) mad->iu.cmd.va = cpu_to_be64(sg_dma_address(job->request_payload.sg_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) mad->iu.cmd.len = cpu_to_be32(sg_dma_len(job->request_payload.sg_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) mad->iu.rsp.va = cpu_to_be64(sg_dma_address(job->reply_payload.sg_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) mad->iu.rsp.len = cpu_to_be32(sg_dma_len(job->reply_payload.sg_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) mad->iu.scsi_id = cpu_to_be64(port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) mad->iu.tag = cpu_to_be64((u64)evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) rsp_len = be32_to_cpu(mad->iu.rsp.len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) evt->sync_iu = &rsp_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) init_completion(&evt->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) rc = ibmvfc_send_event(evt, vhost, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) wait_for_completion(&evt->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) if (rsp_iu.passthru.common.status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) bsg_reply->reply_payload_rcv_len = rsp_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) bsg_reply->result = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) bsg_job_done(job, bsg_reply->result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) bsg_reply->reply_payload_rcv_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) dma_unmap_sg(vhost->dev, job->request_payload.sg_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) job->request_payload.sg_cnt, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) dma_unmap_sg(vhost->dev, job->reply_payload.sg_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) mutex_unlock(&vhost->passthru_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) * ibmvfc_reset_device - Reset the device with the specified reset type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) * @sdev: scsi device to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) * @type: reset type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) * @desc: reset type description for log messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) struct ibmvfc_host *vhost = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) struct ibmvfc_cmd *tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) struct ibmvfc_event *evt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) union ibmvfc_iu rsp_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) int rsp_rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) int rsp_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) if (vhost->state == IBMVFC_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) tmf = &evt->iu.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) memset(tmf, 0, sizeof(*tmf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) tmf->resp.len = cpu_to_be32(sizeof(tmf->rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) tmf->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) tmf->payload_len = cpu_to_be32(sizeof(tmf->iu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) tmf->resp_len = cpu_to_be32(sizeof(tmf->rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) tmf->tgt_scsi_id = cpu_to_be64(rport->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) int_to_scsilun(sdev->lun, &tmf->iu.lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) tmf->iu.tmf_flags = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) evt->sync_iu = &rsp_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) init_completion(&evt->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) if (rsp_rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) sdev_printk(KERN_ERR, sdev, "Failed to send %s reset event. rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) desc, rsp_rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) sdev_printk(KERN_INFO, sdev, "Resetting %s\n", desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) wait_for_completion(&evt->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) if (rsp_iu.cmd.status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) if (rsp_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) if (fc_rsp->flags & FCP_RSP_LEN_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) rsp_code = fc_rsp->data.info.rsp_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) fc_rsp->scsi_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) rsp_rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) sdev_printk(KERN_INFO, sdev, "%s reset successful\n", desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) return rsp_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) * ibmvfc_match_rport - Match function for specified remote port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) * @device: device to match (rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) * 1 if event matches rport / 0 if event does not match rport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) static int ibmvfc_match_rport(struct ibmvfc_event *evt, void *rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) struct fc_rport *cmd_rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) if (evt->cmnd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) cmd_rport = starget_to_rport(scsi_target(evt->cmnd->device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) if (cmd_rport == rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) * ibmvfc_match_target - Match function for specified target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) * @device: device to match (starget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) * 1 if event matches starget / 0 if event does not match starget
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) static int ibmvfc_match_target(struct ibmvfc_event *evt, void *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) if (evt->cmnd && scsi_target(evt->cmnd->device) == device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) * ibmvfc_match_lun - Match function for specified LUN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) * @device: device to match (sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) * 1 if event matches sdev / 0 if event does not match sdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) if (evt->cmnd && evt->cmnd->device == device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) * ibmvfc_wait_for_ops - Wait for ops to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) * @device: device to match (starget or sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) * @match: match function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) * SUCCESS / FAILED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) int (*match) (struct ibmvfc_event *, void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) struct ibmvfc_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) DECLARE_COMPLETION_ONSTACK(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) int wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) list_for_each_entry(evt, &vhost->sent, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) if (match(evt, device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) evt->eh_comp = ∁
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) wait++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) timeout = wait_for_completion_timeout(&comp, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if (!timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) list_for_each_entry(evt, &vhost->sent, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) if (match(evt, device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) evt->eh_comp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) wait++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) if (wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) return wait ? FAILED : SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) } while (wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) * ibmvfc_cancel_all - Cancel all outstanding commands to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) * @sdev: scsi device to cancel commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) * @type: type of error recovery being performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) * This sends a cancel to the VIOS for the specified device. This does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) * NOT send any abort to the actual device. That must be done separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) struct ibmvfc_host *vhost = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) struct scsi_target *starget = scsi_target(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) struct fc_rport *rport = starget_to_rport(starget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) struct ibmvfc_tmf *tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) struct ibmvfc_event *evt, *found_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) union ibmvfc_iu rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) int rsp_rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) u16 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) found_evt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) list_for_each_entry(evt, &vhost->sent, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) if (evt->cmnd && evt->cmnd->device == sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) found_evt = evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (!found_evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) if (vhost->logged_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) tmf = &evt->iu.tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) memset(tmf, 0, sizeof(*tmf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) tmf->common.version = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) tmf->common.length = cpu_to_be16(sizeof(*tmf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) tmf->scsi_id = cpu_to_be64(rport->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) int_to_scsilun(sdev->lun, &tmf->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) if (!(be64_to_cpu(vhost->login_buf->resp.capabilities) & IBMVFC_CAN_SUPPRESS_ABTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) if (vhost->state == IBMVFC_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) evt->sync_iu = &rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) init_completion(&evt->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) if (rsp_rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) /* If failure is received, the host adapter is most likely going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) through reset, return success so the caller will wait for the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) being cancelled to get returned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) wait_for_completion(&evt->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) status = be16_to_cpu(rsp.mad_common.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) if (status != IBMVFC_MAD_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) case IBMVFC_MAD_DRIVER_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) case IBMVFC_MAD_CRQ_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) /* Host adapter most likely going through reset, return success to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) the caller will wait for the command being cancelled to get returned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) * ibmvfc_match_key - Match function for specified cancel key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) * @key: cancel key to match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) * 1 if event matches key / 0 if event does not match key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) static int ibmvfc_match_key(struct ibmvfc_event *evt, void *key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) unsigned long cancel_key = (unsigned long)key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) if (evt->crq.format == IBMVFC_CMD_FORMAT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) be32_to_cpu(evt->iu.cmd.cancel_key) == cancel_key)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) * ibmvfc_match_evt - Match function for specified event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) * @match: event to match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) * 1 if event matches key / 0 if event does not match key
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) static int ibmvfc_match_evt(struct ibmvfc_event *evt, void *match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) if (evt == match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) * ibmvfc_abort_task_set - Abort outstanding commands to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) * @sdev: scsi device to abort commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) * This sends an Abort Task Set to the VIOS for the specified device. This does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) * NOT send any cancel to the VIOS. That must be done separately.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) static int ibmvfc_abort_task_set(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) struct ibmvfc_host *vhost = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) struct ibmvfc_cmd *tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) struct ibmvfc_event *evt, *found_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) union ibmvfc_iu rsp_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) struct ibmvfc_fcp_rsp *fc_rsp = &rsp_iu.cmd.rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) int rc, rsp_rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) int rsp_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) found_evt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) list_for_each_entry(evt, &vhost->sent, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) if (evt->cmnd && evt->cmnd->device == sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) found_evt = evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) if (!found_evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) sdev_printk(KERN_INFO, sdev, "No events found to abort\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) if (vhost->state == IBMVFC_ACTIVE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) tmf = &evt->iu.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) memset(tmf, 0, sizeof(*tmf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) tmf->resp.va = cpu_to_be64(be64_to_cpu(evt->crq.ioba) + offsetof(struct ibmvfc_cmd, rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) tmf->resp.len = cpu_to_be32(sizeof(tmf->rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) tmf->frame_type = cpu_to_be32(IBMVFC_SCSI_FCP_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) tmf->payload_len = cpu_to_be32(sizeof(tmf->iu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) tmf->resp_len = cpu_to_be32(sizeof(tmf->rsp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) tmf->tgt_scsi_id = cpu_to_be64(rport->port_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) int_to_scsilun(sdev->lun, &tmf->iu.lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) tmf->iu.tmf_flags = IBMVFC_ABORT_TASK_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) evt->sync_iu = &rsp_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) init_completion(&evt->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) if (rsp_rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) sdev_printk(KERN_ERR, sdev, "Failed to send abort. rc=%d\n", rsp_rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) sdev_printk(KERN_INFO, sdev, "Aborting outstanding commands\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) timeout = wait_for_completion_timeout(&evt->comp, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) if (!timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) rc = ibmvfc_cancel_all(sdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) if (rc == SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) sdev_printk(KERN_INFO, sdev, "Cancel failed, resetting host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) ibmvfc_reset_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) rsp_rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) if (rc == SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) rsp_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) if (rc != SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) ibmvfc_hard_reset_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) rsp_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) if (rsp_iu.cmd.status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) if (rsp_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) if (fc_rsp->flags & FCP_RSP_LEN_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) rsp_code = fc_rsp->data.info.rsp_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) "flags: %x fcp_rsp: %x, scsi_status: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) fc_rsp->scsi_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) rsp_rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) sdev_printk(KERN_INFO, sdev, "Abort successful\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) return rsp_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) * ibmvfc_eh_abort_handler - Abort a command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) * @cmd: scsi command to abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) * SUCCESS / FAST_IO_FAIL / FAILED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) struct scsi_device *sdev = cmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) struct ibmvfc_host *vhost = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) int cancel_rc, block_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) int rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) block_rc = fc_block_scsi_eh(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) ibmvfc_wait_while_resetting(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) if (block_rc != FAST_IO_FAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) ibmvfc_abort_task_set(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) if (!cancel_rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) if (block_rc == FAST_IO_FAIL && rc != FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) rc = FAST_IO_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) * ibmvfc_eh_device_reset_handler - Reset a single LUN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) * @cmd: scsi command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) * SUCCESS / FAST_IO_FAIL / FAILED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) struct scsi_device *sdev = cmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) struct ibmvfc_host *vhost = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) int cancel_rc, block_rc, reset_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) int rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) block_rc = fc_block_scsi_eh(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) ibmvfc_wait_while_resetting(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) if (block_rc != FAST_IO_FAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) if (!cancel_rc && !reset_rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) if (block_rc == FAST_IO_FAIL && rc != FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) rc = FAST_IO_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) * @sdev: scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) * @data: return code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) unsigned long *rc = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) * @sdev: scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) * @data: return code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) unsigned long *rc = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_TGT_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) * ibmvfc_eh_target_reset_handler - Reset the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) * @cmd: scsi command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) * SUCCESS / FAST_IO_FAIL / FAILED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) struct scsi_device *sdev = cmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) struct ibmvfc_host *vhost = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) struct scsi_target *starget = scsi_target(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) int block_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) int reset_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) int rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) unsigned long cancel_rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) block_rc = fc_block_scsi_eh(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) ibmvfc_wait_while_resetting(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) if (block_rc != FAST_IO_FAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) if (!cancel_rc && !reset_rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) if (block_rc == FAST_IO_FAIL && rc != FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) rc = FAST_IO_FAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) * ibmvfc_eh_host_reset_handler - Reset the connection to the server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) * @cmd: struct scsi_cmnd having problems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) struct ibmvfc_host *vhost = shost_priv(cmd->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) dev_err(vhost->dev, "Resetting connection due to error recovery\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) rc = ibmvfc_issue_fc_host_lip(vhost->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) return rc ? FAILED : SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) * ibmvfc_terminate_rport_io - Terminate all pending I/O to the rport.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) * @rport: rport struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) struct Scsi_Host *shost = rport_to_shost(rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) struct fc_rport *dev_rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) struct ibmvfc_target *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) unsigned long rc, flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) unsigned int found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) shost_for_each_device(sdev, shost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) dev_rport = starget_to_rport(scsi_target(sdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) if (dev_rport != rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) if (rc == FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) ibmvfc_issue_fc_host_lip(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) list_for_each_entry(tgt, &vhost->targets, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) if (tgt->scsi_id == rport->port_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) found++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) * If we get here, that means we previously attempted to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) * an implicit logout to the target but it failed, most likely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) * due to I/O being pending, so we need to send it again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) ibmvfc_del_tgt(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) ibmvfc_reinit_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) static const struct ibmvfc_async_desc ae_desc [] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) { "PLOGI", IBMVFC_AE_ELS_PLOGI, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) { "LOGO", IBMVFC_AE_ELS_LOGO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) { "PRLO", IBMVFC_AE_ELS_PRLO, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) { "N-Port SCN", IBMVFC_AE_SCN_NPORT, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) { "Group SCN", IBMVFC_AE_SCN_GROUP, IBMVFC_DEFAULT_LOG_LEVEL + 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) { "Domain SCN", IBMVFC_AE_SCN_DOMAIN, IBMVFC_DEFAULT_LOG_LEVEL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) { "Fabric SCN", IBMVFC_AE_SCN_FABRIC, IBMVFC_DEFAULT_LOG_LEVEL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) { "Link Up", IBMVFC_AE_LINK_UP, IBMVFC_DEFAULT_LOG_LEVEL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) { "Link Down", IBMVFC_AE_LINK_DOWN, IBMVFC_DEFAULT_LOG_LEVEL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) { "Link Dead", IBMVFC_AE_LINK_DEAD, IBMVFC_DEFAULT_LOG_LEVEL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) { "Halt", IBMVFC_AE_HALT, IBMVFC_DEFAULT_LOG_LEVEL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) { "Resume", IBMVFC_AE_RESUME, IBMVFC_DEFAULT_LOG_LEVEL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) { "Adapter Failed", IBMVFC_AE_ADAPTER_FAILED, IBMVFC_DEFAULT_LOG_LEVEL },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) static const struct ibmvfc_async_desc unknown_ae = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) "Unknown async", 0, IBMVFC_DEFAULT_LOG_LEVEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) * ibmvfc_get_ae_desc - Get text description for async event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) * @ae: async event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) static const struct ibmvfc_async_desc *ibmvfc_get_ae_desc(u64 ae)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) for (i = 0; i < ARRAY_SIZE(ae_desc); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) if (ae_desc[i].ae == ae)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) return &ae_desc[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) return &unknown_ae;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) enum ibmvfc_ae_link_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) const char *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) } link_desc [] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) { IBMVFC_AE_LS_LINK_UP, " link up" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) { IBMVFC_AE_LS_LINK_BOUNCED, " link bounced" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) { IBMVFC_AE_LS_LINK_DOWN, " link down" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) { IBMVFC_AE_LS_LINK_DEAD, " link dead" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) * ibmvfc_get_link_state - Get text description for link state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) * @state: link state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) static const char *ibmvfc_get_link_state(enum ibmvfc_ae_link_state state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) for (i = 0; i < ARRAY_SIZE(link_desc); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) if (link_desc[i].state == state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) return link_desc[i].desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) return "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) * ibmvfc_handle_async - Handle an async event from the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) * @crq: crq to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) const struct ibmvfc_async_desc *desc = ibmvfc_get_ae_desc(be64_to_cpu(crq->event));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) struct ibmvfc_target *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) ibmvfc_log(vhost, desc->log_level, "%s event received. scsi_id: %llx, wwpn: %llx,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) " node_name: %llx%s\n", desc->desc, be64_to_cpu(crq->scsi_id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) be64_to_cpu(crq->wwpn), be64_to_cpu(crq->node_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) ibmvfc_get_link_state(crq->link_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) switch (be64_to_cpu(crq->event)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) case IBMVFC_AE_RESUME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) switch (crq->link_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) case IBMVFC_AE_LS_LINK_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) case IBMVFC_AE_LS_LINK_DEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) case IBMVFC_AE_LS_LINK_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) case IBMVFC_AE_LS_LINK_BOUNCED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) vhost->events_to_log |= IBMVFC_AE_LINKUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) vhost->delay_init = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) __ibmvfc_reset_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) case IBMVFC_AE_LINK_UP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) vhost->events_to_log |= IBMVFC_AE_LINKUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) vhost->delay_init = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) __ibmvfc_reset_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) case IBMVFC_AE_SCN_FABRIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) case IBMVFC_AE_SCN_DOMAIN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) vhost->events_to_log |= IBMVFC_AE_RSCN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) if (vhost->state < IBMVFC_HALTED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) vhost->delay_init = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) __ibmvfc_reset_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) case IBMVFC_AE_SCN_NPORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) case IBMVFC_AE_SCN_GROUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) vhost->events_to_log |= IBMVFC_AE_RSCN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) ibmvfc_reinit_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) case IBMVFC_AE_ELS_LOGO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) case IBMVFC_AE_ELS_PRLO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) case IBMVFC_AE_ELS_PLOGI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) list_for_each_entry(tgt, &vhost->targets, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) if (crq->scsi_id && cpu_to_be64(tgt->scsi_id) != crq->scsi_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) if (crq->wwpn && cpu_to_be64(tgt->ids.port_name) != crq->wwpn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) if (crq->node_name && cpu_to_be64(tgt->ids.node_name) != crq->node_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) if (tgt->need_login && be64_to_cpu(crq->event) == IBMVFC_AE_ELS_LOGO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) tgt->logo_rcvd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) if (!tgt->need_login || be64_to_cpu(crq->event) == IBMVFC_AE_ELS_PLOGI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) ibmvfc_del_tgt(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) ibmvfc_reinit_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) case IBMVFC_AE_LINK_DOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) case IBMVFC_AE_ADAPTER_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) case IBMVFC_AE_LINK_DEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) case IBMVFC_AE_HALT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) ibmvfc_link_down(vhost, IBMVFC_HALTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) dev_err(vhost->dev, "Unknown async event received: %lld\n", crq->event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) * ibmvfc_handle_crq - Handles and frees received events in the CRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) * @crq: Command/Response queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) switch (crq->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) case IBMVFC_CRQ_INIT_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) switch (crq->format) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) case IBMVFC_CRQ_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) dev_info(vhost->dev, "Partner initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) /* Send back a response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) rc = ibmvfc_send_crq_init_complete(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) ibmvfc_init_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) dev_err(vhost->dev, "Unable to send init rsp. rc=%ld\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) case IBMVFC_CRQ_INIT_COMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) dev_info(vhost->dev, "Partner initialization complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) ibmvfc_init_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) dev_err(vhost->dev, "Unknown crq message type: %d\n", crq->format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) case IBMVFC_CRQ_XPORT_EVENT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) vhost->state = IBMVFC_NO_CRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) vhost->logged_in = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) if (crq->format == IBMVFC_PARTITION_MIGRATED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) /* We need to re-setup the interpartition connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) vhost->client_migrated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) ibmvfc_purge_requests(vhost, DID_REQUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) ibmvfc_purge_requests(vhost, DID_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) case IBMVFC_CRQ_CMD_RSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) dev_err(vhost->dev, "Got an invalid message type 0x%02x\n", crq->valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) if (crq->format == IBMVFC_ASYNC_EVENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) /* The only kind of payload CRQs we should get are responses to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) * things we send. Make sure this response is to something we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) * actually sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) crq->ioba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) if (unlikely(atomic_read(&evt->free))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) crq->ioba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) del_timer(&evt->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) list_del(&evt->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) ibmvfc_trc_end(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) evt->done(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) * ibmvfc_scan_finished - Check if the device scan is done.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) * @shost: scsi host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) * @time: current elapsed time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) * 0 if scan is not done / 1 if scan is done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) int done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) if (time >= (init_timeout * HZ)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) dev_info(vhost->dev, "Scan taking longer than %d seconds, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) "continuing initialization\n", init_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) if (vhost->scan_complete)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) return done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) * ibmvfc_slave_alloc - Setup the device's task set value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) * @sdev: struct scsi_device device to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) * Set the device's task set value so that error handling works as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) * expected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) * 0 on success / -ENXIO if device does not exist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) static int ibmvfc_slave_alloc(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) struct Scsi_Host *shost = sdev->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) if (!rport || fc_remote_port_chkready(rport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) sdev->hostdata = (void *)(unsigned long)vhost->task_set++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) * ibmvfc_target_alloc - Setup the target's task set value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) * @starget: struct scsi_target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) * Set the target's task set value so that error handling works as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) * expected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) * 0 on success / -ENXIO if device does not exist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) static int ibmvfc_target_alloc(struct scsi_target *starget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) starget->hostdata = (void *)(unsigned long)vhost->task_set++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) * ibmvfc_slave_configure - Configure the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) * @sdev: struct scsi_device device to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) * Enable allow_restart for a device if it is a disk. Adjust the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) * queue_depth here also.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) * 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) static int ibmvfc_slave_configure(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) struct Scsi_Host *shost = sdev->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) if (sdev->type == TYPE_DISK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) sdev->allow_restart = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) * ibmvfc_change_queue_depth - Change the device's queue depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) * @sdev: scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) * @qdepth: depth to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) * @reason: calling context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) * actual depth set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) static int ibmvfc_change_queue_depth(struct scsi_device *sdev, int qdepth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) if (qdepth > IBMVFC_MAX_CMDS_PER_LUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) qdepth = IBMVFC_MAX_CMDS_PER_LUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) return scsi_change_queue_depth(sdev, qdepth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) return snprintf(buf, PAGE_SIZE, "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) vhost->login_buf->resp.partition_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) static ssize_t ibmvfc_show_host_device_name(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) return snprintf(buf, PAGE_SIZE, "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) vhost->login_buf->resp.device_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) return snprintf(buf, PAGE_SIZE, "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) vhost->login_buf->resp.port_loc_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) return snprintf(buf, PAGE_SIZE, "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) vhost->login_buf->resp.drc_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) * ibmvfc_show_log_level - Show the adapter's error logging level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) * @dev: class device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) * @buf: buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) * number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) static ssize_t ibmvfc_show_log_level(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->log_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) * ibmvfc_store_log_level - Change the adapter's error logging level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) * @dev: class device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) * @buf: buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) * number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) static ssize_t ibmvfc_store_log_level(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) vhost->log_level = simple_strtoul(buf, NULL, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) return strlen(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) ibmvfc_show_log_level, ibmvfc_store_log_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) #ifdef CONFIG_SCSI_IBMVFC_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) * ibmvfc_read_trace - Dump the adapter trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) * @filp: open sysfs file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) * @kobj: kobject struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) * @bin_attr: bin_attribute struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) * @buf: buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) * @off: offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) * @count: buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) * number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) static ssize_t ibmvfc_read_trace(struct file *filp, struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) struct bin_attribute *bin_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) char *buf, loff_t off, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) struct device *dev = container_of(kobj, struct device, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) struct ibmvfc_host *vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) int size = IBMVFC_TRACE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) char *src = (char *)vhost->trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) if (off > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) if (off + count > size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) size -= off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) count = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) spin_lock_irqsave(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) memcpy(buf, &src[off], count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) spin_unlock_irqrestore(shost->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) static struct bin_attribute ibmvfc_trace_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) .attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) .name = "trace",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) .mode = S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) .size = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) .read = ibmvfc_read_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) static struct device_attribute *ibmvfc_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) &dev_attr_partition_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) &dev_attr_device_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) &dev_attr_port_loc_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) &dev_attr_drc_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) &dev_attr_npiv_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) &dev_attr_capabilities,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) &dev_attr_log_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) static struct scsi_host_template driver_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) .name = "IBM POWER Virtual FC Adapter",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) .proc_name = IBMVFC_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) .queuecommand = ibmvfc_queuecommand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) .eh_timed_out = fc_eh_timed_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) .eh_abort_handler = ibmvfc_eh_abort_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) .slave_alloc = ibmvfc_slave_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) .slave_configure = ibmvfc_slave_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) .target_alloc = ibmvfc_target_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) .scan_finished = ibmvfc_scan_finished,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) .change_queue_depth = ibmvfc_change_queue_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) .cmd_per_lun = 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) .this_id = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) .sg_tablesize = SG_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) .max_sectors = IBMVFC_MAX_SECTORS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) .shost_attrs = ibmvfc_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) .track_queue_depth = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) * ibmvfc_next_async_crq - Returns the next entry in async queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) * Pointer to next entry in queue / NULL if empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) struct ibmvfc_async_crq *crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) crq = &async_crq->msgs[async_crq->cur];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) if (crq->valid & 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) if (++async_crq->cur == async_crq->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) async_crq->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) crq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) return crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) * ibmvfc_next_crq - Returns the next entry in message queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) * Pointer to next entry in queue / NULL if empty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) struct ibmvfc_crq_queue *queue = &vhost->crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) struct ibmvfc_crq *crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) crq = &queue->msgs[queue->cur];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) if (crq->valid & 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) if (++queue->cur == queue->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) queue->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) crq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) return crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) * ibmvfc_interrupt - Interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) * @irq: number of irq to handle, not used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) * @dev_instance: ibmvfc_host that received interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) * IRQ_HANDLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) static irqreturn_t ibmvfc_interrupt(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) struct ibmvfc_host *vhost = (struct ibmvfc_host *)dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) vio_disable_interrupts(to_vio_dev(vhost->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) tasklet_schedule(&vhost->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) * ibmvfc_tasklet - Interrupt handler tasklet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) * @data: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) * Nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) static void ibmvfc_tasklet(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) struct ibmvfc_host *vhost = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) struct vio_dev *vdev = to_vio_dev(vhost->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) struct ibmvfc_crq *crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) struct ibmvfc_async_crq *async;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) int done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) while (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) /* Pull all the valid messages off the async CRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) ibmvfc_handle_async(async, vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) async->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) /* Pull all the valid messages off the CRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) ibmvfc_handle_crq(crq, vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) crq->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) vio_enable_interrupts(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) if ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) vio_disable_interrupts(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) ibmvfc_handle_async(async, vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) async->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) } else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) vio_disable_interrupts(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) ibmvfc_handle_crq(crq, vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) crq->valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) * ibmvfc_init_tgt - Set the next init job step for the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) * @tgt: ibmvfc target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) * @job_step: job step to perform
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) void (*job_step) (struct ibmvfc_target *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) tgt->job_step = job_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) wake_up(&tgt->vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) * ibmvfc_retry_tgt_init - Attempt to retry a step in target initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) * @tgt: ibmvfc target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) * @job_step: initialization job step
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) * Returns: 1 if step will be retried / 0 if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) void (*job_step) (struct ibmvfc_target *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) ibmvfc_del_tgt(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) wake_up(&tgt->vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) ibmvfc_init_tgt(tgt, job_step);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) /* Defined in FC-LS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) int code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) int retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) int logged_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) } prli_rsp [] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) { 0, 1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) { 1, 0, 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) { 2, 1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) { 3, 1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) { 4, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) { 5, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) { 6, 0, 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) { 7, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) { 8, 1, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) * ibmvfc_get_prli_rsp - Find PRLI response index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) * @flags: PRLI response flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) static int ibmvfc_get_prli_rsp(u16 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) int code = (flags & 0x0f00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) for (i = 0; i < ARRAY_SIZE(prli_rsp); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) if (prli_rsp[i].code == code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) * ibmvfc_tgt_prli_done - Completion handler for Process Login
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) struct ibmvfc_target *tgt = evt->tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) u32 status = be16_to_cpu(rsp->common.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) vhost->discovery_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) case IBMVFC_MAD_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) tgt_dbg(tgt, "Process Login succeeded: %X %02X %04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) parms->type, parms->flags, parms->service_parms);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) if (parms->type == IBMVFC_SCSI_FCP_TYPE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) index = ibmvfc_get_prli_rsp(be16_to_cpu(parms->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) if (prli_rsp[index].logged_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) if (be16_to_cpu(parms->flags) & IBMVFC_PRLI_EST_IMG_PAIR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) tgt->need_login = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) tgt->ids.roles = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_TARGET_FUNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) if (be32_to_cpu(parms->service_parms) & IBMVFC_PRLI_INITIATOR_FUNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) tgt->add_rport = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) ibmvfc_del_tgt(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) } else if (prli_rsp[index].retry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) ibmvfc_del_tgt(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) ibmvfc_del_tgt(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) case IBMVFC_MAD_DRIVER_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) case IBMVFC_MAD_CRQ_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) case IBMVFC_MAD_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) if ((be16_to_cpu(rsp->status) & IBMVFC_VIOS_FAILURE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) be16_to_cpu(rsp->error) == IBMVFC_PLOGI_REQUIRED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) else if (tgt->logo_rcvd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) ibmvfc_del_tgt(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) * ibmvfc_tgt_send_prli - Send a process login
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) * @tgt: ibmvfc target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) struct ibmvfc_process_login *prli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) struct ibmvfc_host *vhost = tgt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) struct ibmvfc_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) if (vhost->discovery_threads >= disc_threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) kref_get(&tgt->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) vhost->discovery_threads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) evt->tgt = tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) prli = &evt->iu.prli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) memset(prli, 0, sizeof(*prli));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) prli->common.version = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) prli->common.opcode = cpu_to_be32(IBMVFC_PROCESS_LOGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) prli->common.length = cpu_to_be16(sizeof(*prli));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) prli->scsi_id = cpu_to_be64(tgt->scsi_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) prli->parms.type = IBMVFC_SCSI_FCP_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) prli->parms.flags = cpu_to_be16(IBMVFC_PRLI_EST_IMG_PAIR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) prli->parms.service_parms = cpu_to_be32(IBMVFC_PRLI_INITIATOR_FUNC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) if (cls3_error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) prli->parms.service_parms |= cpu_to_be32(IBMVFC_PRLI_RETRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) if (ibmvfc_send_event(evt, vhost, default_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) vhost->discovery_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) tgt_dbg(tgt, "Sent process login\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) * ibmvfc_tgt_plogi_done - Completion handler for Port Login
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) struct ibmvfc_target *tgt = evt->tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) u32 status = be16_to_cpu(rsp->common.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) int level = IBMVFC_DEFAULT_LOG_LEVEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) vhost->discovery_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) case IBMVFC_MAD_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) tgt_dbg(tgt, "Port Login succeeded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) if (tgt->ids.port_name &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) tgt->ids.port_name != wwn_to_u64(rsp->service_parms.port_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) vhost->reinit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) tgt_dbg(tgt, "Port re-init required\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) tgt->ids.port_id = tgt->scsi_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) memcpy(&tgt->service_parms, &rsp->service_parms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) sizeof(tgt->service_parms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) sizeof(tgt->service_parms_change));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) case IBMVFC_MAD_DRIVER_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) case IBMVFC_MAD_CRQ_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) case IBMVFC_MAD_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) ibmvfc_del_tgt(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) * ibmvfc_tgt_send_plogi - Send PLOGI to the specified target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) * @tgt: ibmvfc target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) struct ibmvfc_port_login *plogi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) struct ibmvfc_host *vhost = tgt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) struct ibmvfc_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) if (vhost->discovery_threads >= disc_threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) kref_get(&tgt->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) tgt->logo_rcvd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) vhost->discovery_threads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) evt->tgt = tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) plogi = &evt->iu.plogi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) memset(plogi, 0, sizeof(*plogi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) plogi->common.version = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) plogi->common.opcode = cpu_to_be32(IBMVFC_PORT_LOGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) plogi->common.length = cpu_to_be16(sizeof(*plogi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) plogi->scsi_id = cpu_to_be64(tgt->scsi_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) if (ibmvfc_send_event(evt, vhost, default_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) vhost->discovery_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) tgt_dbg(tgt, "Sent port login\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) * ibmvfc_tgt_implicit_logout_done - Completion handler for Implicit Logout MAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) static void ibmvfc_tgt_implicit_logout_done(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) struct ibmvfc_target *tgt = evt->tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) struct ibmvfc_implicit_logout *rsp = &evt->xfer_iu->implicit_logout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) u32 status = be16_to_cpu(rsp->common.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) vhost->discovery_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) case IBMVFC_MAD_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) tgt_dbg(tgt, "Implicit Logout succeeded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) case IBMVFC_MAD_DRIVER_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) case IBMVFC_MAD_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) tgt_err(tgt, "Implicit Logout failed: rc=0x%02X\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) * __ibmvfc_tgt_get_implicit_logout_evt - Allocate and init an event for implicit logout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) * @tgt: ibmvfc target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) * Allocated and initialized ibmvfc_event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_target *tgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) void (*done) (struct ibmvfc_event *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) struct ibmvfc_implicit_logout *mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) struct ibmvfc_host *vhost = tgt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) struct ibmvfc_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) kref_get(&tgt->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) evt->tgt = tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) mad = &evt->iu.implicit_logout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) memset(mad, 0, sizeof(*mad));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) mad->common.version = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) mad->common.opcode = cpu_to_be32(IBMVFC_IMPLICIT_LOGOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) mad->common.length = cpu_to_be16(sizeof(*mad));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) mad->old_scsi_id = cpu_to_be64(tgt->scsi_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) return evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) * ibmvfc_tgt_implicit_logout - Initiate an Implicit Logout for specified target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) * @tgt: ibmvfc target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) static void ibmvfc_tgt_implicit_logout(struct ibmvfc_target *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) struct ibmvfc_host *vhost = tgt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) struct ibmvfc_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) if (vhost->discovery_threads >= disc_threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) vhost->discovery_threads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) ibmvfc_tgt_implicit_logout_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) if (ibmvfc_send_event(evt, vhost, default_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) vhost->discovery_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) tgt_dbg(tgt, "Sent Implicit Logout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) * ibmvfc_tgt_implicit_logout_and_del_done - Completion handler for Implicit Logout MAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) struct ibmvfc_target *tgt = evt->tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) u32 status = be16_to_cpu(mad->common.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) vhost->discovery_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) * driver in which case we need to free up all the targets. If we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) * not unloading, we will still go through a hard reset to get out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) * offline state, so there is no need to track the old targets in that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) * case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) * ibmvfc_tgt_implicit_logout_and_del - Initiate an Implicit Logout for specified target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) * @tgt: ibmvfc target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) struct ibmvfc_host *vhost = tgt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) struct ibmvfc_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) if (!vhost->logged_in) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) if (vhost->discovery_threads >= disc_threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) vhost->discovery_threads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) evt = __ibmvfc_tgt_get_implicit_logout_evt(tgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) ibmvfc_tgt_implicit_logout_and_del_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) if (ibmvfc_send_event(evt, vhost, default_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) vhost->discovery_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) tgt_dbg(tgt, "Sent Implicit Logout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) * ibmvfc_tgt_move_login_done - Completion handler for Move Login
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) struct ibmvfc_target *tgt = evt->tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) u32 status = be16_to_cpu(rsp->common.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) int level = IBMVFC_DEFAULT_LOG_LEVEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) vhost->discovery_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) case IBMVFC_MAD_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) tgt_dbg(tgt, "Move Login succeeded for old scsi_id: %llX\n", tgt->old_scsi_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737) tgt->ids.port_id = tgt->scsi_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) memcpy(&tgt->service_parms, &rsp->service_parms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) sizeof(tgt->service_parms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) sizeof(tgt->service_parms_change));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) case IBMVFC_MAD_DRIVER_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) case IBMVFC_MAD_CRQ_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) case IBMVFC_MAD_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) tgt_log(tgt, level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) "Move Login failed: old scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) tgt->old_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) * ibmvfc_tgt_move_login - Initiate a move login for specified target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) * @tgt: ibmvfc target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) struct ibmvfc_host *vhost = tgt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) struct ibmvfc_move_login *move;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) struct ibmvfc_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) if (vhost->discovery_threads >= disc_threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) kref_get(&tgt->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) vhost->discovery_threads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) evt->tgt = tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) move = &evt->iu.move_login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) memset(move, 0, sizeof(*move));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) move->common.version = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) move->common.length = cpu_to_be16(sizeof(*move));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) move->old_scsi_id = cpu_to_be64(tgt->old_scsi_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) move->new_scsi_id = cpu_to_be64(tgt->scsi_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) move->wwpn = cpu_to_be64(tgt->wwpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) move->node_name = cpu_to_be64(tgt->ids.node_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) if (ibmvfc_send_event(evt, vhost, default_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) vhost->discovery_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) tgt_dbg(tgt, "Sent Move Login for old scsi_id: %llX\n", tgt->old_scsi_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) * ibmvfc_adisc_needs_plogi - Does device need PLOGI?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) * @mad: ibmvfc passthru mad struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) * @tgt: ibmvfc target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) * 1 if PLOGI needed / 0 if PLOGI not needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) static int ibmvfc_adisc_needs_plogi(struct ibmvfc_passthru_mad *mad,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) struct ibmvfc_target *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) if (wwn_to_u64((u8 *)&mad->fc_iu.response[2]) != tgt->ids.port_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) if (wwn_to_u64((u8 *)&mad->fc_iu.response[4]) != tgt->ids.node_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) if (be32_to_cpu(mad->fc_iu.response[6]) != tgt->scsi_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) * ibmvfc_tgt_adisc_done - Completion handler for ADISC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) struct ibmvfc_target *tgt = evt->tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) struct ibmvfc_passthru_mad *mad = &evt->xfer_iu->passthru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) u32 status = be16_to_cpu(mad->common.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) u8 fc_reason, fc_explain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) vhost->discovery_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) del_timer(&tgt->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) case IBMVFC_MAD_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) tgt_dbg(tgt, "ADISC succeeded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) if (ibmvfc_adisc_needs_plogi(mad, tgt))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) ibmvfc_del_tgt(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) case IBMVFC_MAD_DRIVER_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) case IBMVFC_MAD_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) ibmvfc_del_tgt(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) fc_reason = (be32_to_cpu(mad->fc_iu.response[1]) & 0x00ff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) ibmvfc_get_fc_type(fc_reason), fc_reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) * ibmvfc_init_passthru - Initialize an event struct for FC passthru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) static void ibmvfc_init_passthru(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) struct ibmvfc_passthru_mad *mad = &evt->iu.passthru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) memset(mad, 0, sizeof(*mad));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) mad->common.version = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) mad->common.opcode = cpu_to_be32(IBMVFC_PASSTHRU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) mad->common.length = cpu_to_be16(sizeof(*mad) - sizeof(mad->fc_iu) - sizeof(mad->iu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) mad->cmd_ioba.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) offsetof(struct ibmvfc_passthru_mad, iu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) mad->cmd_ioba.len = cpu_to_be32(sizeof(mad->iu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) mad->iu.cmd_len = cpu_to_be32(sizeof(mad->fc_iu.payload));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) mad->iu.rsp_len = cpu_to_be32(sizeof(mad->fc_iu.response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) mad->iu.cmd.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) offsetof(struct ibmvfc_passthru_mad, fc_iu) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) offsetof(struct ibmvfc_passthru_fc_iu, payload));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) mad->iu.cmd.len = cpu_to_be32(sizeof(mad->fc_iu.payload));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) mad->iu.rsp.va = cpu_to_be64((u64)be64_to_cpu(evt->crq.ioba) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) offsetof(struct ibmvfc_passthru_mad, fc_iu) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) offsetof(struct ibmvfc_passthru_fc_iu, response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) mad->iu.rsp.len = cpu_to_be32(sizeof(mad->fc_iu.response));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) * ibmvfc_tgt_adisc_cancel_done - Completion handler when cancelling an ADISC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) * Just cleanup this event struct. Everything else is handled by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) * the ADISC completion handler. If the ADISC never actually comes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) * back, we still have the timer running on the ADISC event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) * which will fire and cause the CRQ to get reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) struct ibmvfc_target *tgt = evt->tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) tgt_dbg(tgt, "ADISC cancel complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) vhost->abort_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) * ibmvfc_adisc_timeout - Handle an ADISC timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) * @tgt: ibmvfc target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) * If an ADISC times out, send a cancel. If the cancel times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) * out, reset the CRQ. When the ADISC comes back as cancelled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) * log back into the target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) static void ibmvfc_adisc_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) struct ibmvfc_target *tgt = from_timer(tgt, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) struct ibmvfc_host *vhost = tgt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) struct ibmvfc_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) struct ibmvfc_tmf *tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) tgt_dbg(tgt, "ADISC timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) if (vhost->abort_threads >= disc_threads ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) tgt->action != IBMVFC_TGT_ACTION_INIT_WAIT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) vhost->state != IBMVFC_INITIALIZING ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) vhost->action != IBMVFC_HOST_ACTION_QUERY_TGTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) vhost->abort_threads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) kref_get(&tgt->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) evt->tgt = tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) tmf = &evt->iu.tmf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) memset(tmf, 0, sizeof(*tmf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) tmf->common.version = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) tmf->common.length = cpu_to_be16(sizeof(*tmf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) tmf->scsi_id = cpu_to_be64(tgt->scsi_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) tmf->cancel_key = cpu_to_be32(tgt->cancel_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) rc = ibmvfc_send_event(evt, vhost, default_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) tgt_err(tgt, "Failed to send cancel event for ADISC. rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) vhost->abort_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) __ibmvfc_reset_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) tgt_dbg(tgt, "Attempting to cancel ADISC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) * ibmvfc_tgt_adisc - Initiate an ADISC for specified target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) * @tgt: ibmvfc target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) * When sending an ADISC we end up with two timers running. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) * first timer is the timer in the ibmvfc target struct. If this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) * fires, we send a cancel to the target. The second timer is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) * timer on the ibmvfc event for the ADISC, which is longer. If that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) * fires, it means the ADISC timed out and our attempt to cancel it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) * also failed, so we need to reset the CRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) struct ibmvfc_passthru_mad *mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) struct ibmvfc_host *vhost = tgt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) struct ibmvfc_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) if (vhost->discovery_threads >= disc_threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) kref_get(&tgt->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) vhost->discovery_threads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) evt->tgt = tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) ibmvfc_init_passthru(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) mad = &evt->iu.passthru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999) mad->iu.flags = cpu_to_be32(IBMVFC_FC_ELS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) mad->iu.scsi_id = cpu_to_be64(tgt->scsi_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) mad->iu.cancel_key = cpu_to_be32(tgt->cancel_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) mad->fc_iu.payload[0] = cpu_to_be32(IBMVFC_ADISC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) memcpy(&mad->fc_iu.payload[2], &vhost->login_buf->resp.port_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) sizeof(vhost->login_buf->resp.port_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) memcpy(&mad->fc_iu.payload[4], &vhost->login_buf->resp.node_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) sizeof(vhost->login_buf->resp.node_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) mad->fc_iu.payload[6] = cpu_to_be32(be64_to_cpu(vhost->login_buf->resp.scsi_id) & 0x00ffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) if (timer_pending(&tgt->timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) add_timer(&tgt->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) if (ibmvfc_send_event(evt, vhost, IBMVFC_ADISC_PLUS_CANCEL_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) vhost->discovery_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) del_timer(&tgt->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) tgt_dbg(tgt, "Sent ADISC\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) * ibmvfc_tgt_query_target_done - Completion handler for Query Target MAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) struct ibmvfc_target *tgt = evt->tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) u32 status = be16_to_cpu(rsp->common.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) int level = IBMVFC_DEFAULT_LOG_LEVEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) vhost->discovery_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) case IBMVFC_MAD_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) tgt_dbg(tgt, "Query Target succeeded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) if (be64_to_cpu(rsp->scsi_id) != tgt->scsi_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) ibmvfc_del_tgt(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) case IBMVFC_MAD_DRIVER_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) case IBMVFC_MAD_CRQ_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) case IBMVFC_MAD_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) if ((be16_to_cpu(rsp->status) & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) be16_to_cpu(rsp->error) == IBMVFC_UNABLE_TO_PERFORM_REQ &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) be16_to_cpu(rsp->fc_explain) == IBMVFC_PORT_NAME_NOT_REG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) ibmvfc_del_tgt(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) else if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) ibmvfc_del_tgt(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) * ibmvfc_tgt_query_target - Initiate a Query Target for specified target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) * @tgt: ibmvfc target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) struct ibmvfc_query_tgt *query_tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) struct ibmvfc_host *vhost = tgt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) struct ibmvfc_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) if (vhost->discovery_threads >= disc_threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) kref_get(&tgt->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) vhost->discovery_threads++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) evt->tgt = tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) query_tgt = &evt->iu.query_tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) memset(query_tgt, 0, sizeof(*query_tgt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101) query_tgt->common.version = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) query_tgt->common.opcode = cpu_to_be32(IBMVFC_QUERY_TARGET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) query_tgt->common.length = cpu_to_be16(sizeof(*query_tgt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) query_tgt->wwpn = cpu_to_be64(tgt->ids.port_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) if (ibmvfc_send_event(evt, vhost, default_timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108) vhost->discovery_threads--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) tgt_dbg(tgt, "Sent Query Target\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) * ibmvfc_alloc_target - Allocate and initialize an ibmvfc target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) * @scsi_id: SCSI ID to allocate target for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) struct ibmvfc_discover_targets_entry *target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) struct ibmvfc_target *stgt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) struct ibmvfc_target *wtgt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) struct ibmvfc_target *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) u64 wwpn = be64_to_cpu(target->wwpn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) /* Look to see if we already have a target allocated for this SCSI ID or WWPN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) list_for_each_entry(tgt, &vhost->targets, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) if (tgt->wwpn == wwpn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) wtgt = tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) list_for_each_entry(tgt, &vhost->targets, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) if (tgt->scsi_id == scsi_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) stgt = tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) if (wtgt && !stgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151) * A WWPN target has moved and we still are tracking the old
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) * SCSI ID. The only way we should be able to get here is if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) * we attempted to send an implicit logout for the old SCSI ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) * and it failed for some reason, such as there being I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) * pending to the target. In this case, we will have already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) * deleted the rport from the FC transport so we do a move
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) * login, which works even with I/O pending, as it will cancel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) * any active commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) * Do a move login here. The old target is no longer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) * known to the transport layer We don't use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) * normal ibmvfc_set_tgt_action to set this, as we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) * don't normally want to allow this state change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) wtgt->old_scsi_id = wtgt->scsi_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) wtgt->scsi_id = scsi_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) wtgt->action = IBMVFC_TGT_ACTION_INIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) tgt_err(wtgt, "Unexpected target state: %d, %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) wtgt->action, wtgt->rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) } else if (stgt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) if (tgt->need_login)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) goto unlock_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) memset(tgt, 0, sizeof(*tgt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) tgt->scsi_id = scsi_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) tgt->wwpn = wwpn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) tgt->vhost = vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) tgt->need_login = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) kref_init(&tgt->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) tgt->cancel_key = vhost->task_set++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) list_add_tail(&tgt->queue, &vhost->targets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) unlock_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) * ibmvfc_alloc_targets - Allocate and initialize ibmvfc targets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) * ibmvfc_discover_targets_done - Completion handler for discover targets MAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) u32 mad_status = be16_to_cpu(rsp->common.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) int level = IBMVFC_DEFAULT_LOG_LEVEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) switch (mad_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) case IBMVFC_MAD_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) ibmvfc_dbg(vhost, "Discover Targets succeeded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) vhost->num_targets = be32_to_cpu(rsp->num_written);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) case IBMVFC_MAD_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) level += ibmvfc_retry_host_init(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) case IBMVFC_MAD_DRIVER_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) dev_err(vhost->dev, "Invalid Discover Targets response: 0x%x\n", mad_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) * ibmvfc_discover_targets - Send Discover Targets MAD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) struct ibmvfc_discover_targets *mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) mad = &evt->iu.discover_targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266) memset(mad, 0, sizeof(*mad));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) mad->common.version = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) mad->common.opcode = cpu_to_be32(IBMVFC_DISC_TARGETS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) mad->common.length = cpu_to_be16(sizeof(*mad));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) if (!ibmvfc_send_event(evt, vhost, default_timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) ibmvfc_dbg(vhost, "Sent discover targets\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) * ibmvfc_npiv_login_done - Completion handler for NPIV Login
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) * @evt: ibmvfc event struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_login.common.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) unsigned int npiv_max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) int level = IBMVFC_DEFAULT_LOG_LEVEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) switch (mad_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) case IBMVFC_MAD_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) case IBMVFC_MAD_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) if (ibmvfc_retry_cmd(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) level += ibmvfc_retry_host_init(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305) ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) case IBMVFC_MAD_CRQ_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) ibmvfc_retry_host_init(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) case IBMVFC_MAD_DRIVER_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) dev_err(vhost->dev, "Invalid NPIV Login response: 0x%x\n", mad_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) vhost->client_migrated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) if (!(be32_to_cpu(rsp->flags) & IBMVFC_NATIVE_FC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) dev_err(vhost->dev, "Virtual adapter does not support FC. %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) rsp->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) if (be32_to_cpu(rsp->max_cmds) <= IBMVFC_NUM_INTERNAL_REQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) dev_err(vhost->dev, "Virtual adapter supported queue depth too small: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) rsp->max_cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) vhost->logged_in = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) npiv_max_sectors = min((uint)(be64_to_cpu(rsp->max_dma_len) >> 9), IBMVFC_MAX_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) rsp->partition_name, rsp->device_name, rsp->port_loc_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) rsp->drc_name, npiv_max_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) fc_host_fabric_name(vhost->host) = be64_to_cpu(rsp->node_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) fc_host_node_name(vhost->host) = be64_to_cpu(rsp->node_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) fc_host_port_name(vhost->host) = be64_to_cpu(rsp->port_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) fc_host_port_id(vhost->host) = be64_to_cpu(rsp->scsi_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) fc_host_port_type(vhost->host) = FC_PORTTYPE_NPIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) fc_host_supported_classes(vhost->host) = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) if (be32_to_cpu(rsp->service_parms.class1_parms[0]) & 0x80000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) fc_host_supported_classes(vhost->host) |= FC_COS_CLASS1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) if (be32_to_cpu(rsp->service_parms.class2_parms[0]) & 0x80000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) fc_host_supported_classes(vhost->host) |= FC_COS_CLASS2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) if (be32_to_cpu(rsp->service_parms.class3_parms[0]) & 0x80000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) fc_host_supported_classes(vhost->host) |= FC_COS_CLASS3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) fc_host_maxframe_size(vhost->host) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) be16_to_cpu(rsp->service_parms.common.bb_rcv_sz) & 0x0fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) vhost->host->max_sectors = npiv_max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) wake_up(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) * ibmvfc_npiv_login - Sends NPIV login
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372) static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) struct ibmvfc_npiv_login_mad *mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) ibmvfc_gather_partition_info(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) ibmvfc_set_login_info(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) ibmvfc_init_event(evt, ibmvfc_npiv_login_done, IBMVFC_MAD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) memcpy(vhost->login_buf, &vhost->login_info, sizeof(vhost->login_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) mad = &evt->iu.npiv_login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) memset(mad, 0, sizeof(struct ibmvfc_npiv_login_mad));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) mad->common.version = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_login_mad));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) mad->buffer.va = cpu_to_be64(vhost->login_buf_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) mad->buffer.len = cpu_to_be32(sizeof(*vhost->login_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) if (!ibmvfc_send_event(evt, vhost, default_timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) ibmvfc_dbg(vhost, "Sent NPIV login\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) struct ibmvfc_host *vhost = evt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) u32 mad_status = be16_to_cpu(evt->xfer_iu->npiv_logout.common.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) ibmvfc_free_event(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) switch (mad_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) case IBMVFC_MAD_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412) if (list_empty(&vhost->sent) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) ibmvfc_init_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) case IBMVFC_MAD_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) case IBMVFC_MAD_NOT_SUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) case IBMVFC_MAD_CRQ_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) case IBMVFC_MAD_DRIVER_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) ibmvfc_hard_reset_host(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) * ibmvfc_npiv_logout - Issue an NPIV Logout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) struct ibmvfc_npiv_logout_mad *mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) struct ibmvfc_event *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) evt = ibmvfc_get_event(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) mad = &evt->iu.npiv_logout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) memset(mad, 0, sizeof(*mad));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) mad->common.version = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) mad->common.opcode = cpu_to_be32(IBMVFC_NPIV_LOGOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) mad->common.length = cpu_to_be16(sizeof(struct ibmvfc_npiv_logout_mad));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) if (!ibmvfc_send_event(evt, vhost, default_timeout))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) ibmvfc_dbg(vhost, "Sent NPIV logout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) * ibmvfc_dev_init_to_do - Is there target initialization work to do?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) * 1 if work to do / 0 if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) static int ibmvfc_dev_init_to_do(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) struct ibmvfc_target *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) list_for_each_entry(tgt, &vhost->targets, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) if (tgt->action == IBMVFC_TGT_ACTION_INIT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) * ibmvfc_dev_logo_to_do - Is there target logout work to do?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) * 1 if work to do / 0 if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) static int ibmvfc_dev_logo_to_do(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) struct ibmvfc_target *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) list_for_each_entry(tgt, &vhost->targets, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) * __ibmvfc_work_to_do - Is there task level work to do? (no locking)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) * 1 if work to do / 0 if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) struct ibmvfc_target *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) if (kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) switch (vhost->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) case IBMVFC_HOST_ACTION_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) case IBMVFC_HOST_ACTION_INIT_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) case IBMVFC_HOST_ACTION_LOGO_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) case IBMVFC_HOST_ACTION_TGT_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) case IBMVFC_HOST_ACTION_QUERY_TGTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) if (vhost->discovery_threads == disc_threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) list_for_each_entry(tgt, &vhost->targets, queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) if (tgt->action == IBMVFC_TGT_ACTION_INIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) list_for_each_entry(tgt, &vhost->targets, queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) case IBMVFC_HOST_ACTION_TGT_DEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) if (vhost->discovery_threads == disc_threads)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) list_for_each_entry(tgt, &vhost->targets, queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) list_for_each_entry(tgt, &vhost->targets, queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) case IBMVFC_HOST_ACTION_LOGO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) case IBMVFC_HOST_ACTION_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) case IBMVFC_HOST_ACTION_ALLOC_TGTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) case IBMVFC_HOST_ACTION_QUERY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) case IBMVFC_HOST_ACTION_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) case IBMVFC_HOST_ACTION_REENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) * ibmvfc_work_to_do - Is there task level work to do?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) * 1 if work to do / 0 if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) static int ibmvfc_work_to_do(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) rc = __ibmvfc_work_to_do(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) * ibmvfc_log_ae - Log async events if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) * @events: events to log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) if (events & IBMVFC_AE_RSCN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_RSCN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) if ((events & IBMVFC_AE_LINKDOWN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) vhost->state >= IBMVFC_HALTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) if ((events & IBMVFC_AE_LINKUP) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) vhost->state == IBMVFC_INITIALIZING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) fc_host_post_event(vhost->host, fc_get_event_number(), FCH_EVT_LINKUP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) * ibmvfc_tgt_add_rport - Tell the FC transport about a new remote port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) * @tgt: ibmvfc target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) struct ibmvfc_host *vhost = tgt->vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) struct fc_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) tgt_dbg(tgt, "Adding rport\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) tgt_dbg(tgt, "Deleting rport\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) list_del(&tgt->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) fc_remote_port_delete(rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) del_timer_sync(&tgt->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) tgt->rport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) fc_remote_port_delete(rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) if (rport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) tgt_dbg(tgt, "rport add succeeded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) tgt->rport = rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) rport->maxframe_size = be16_to_cpu(tgt->service_parms.common.bb_rcv_sz) & 0x0fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) rport->supported_classes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) tgt->target_id = rport->scsi_target_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) if (be32_to_cpu(tgt->service_parms.class1_parms[0]) & 0x80000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) rport->supported_classes |= FC_COS_CLASS1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) if (be32_to_cpu(tgt->service_parms.class2_parms[0]) & 0x80000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) rport->supported_classes |= FC_COS_CLASS2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) if (be32_to_cpu(tgt->service_parms.class3_parms[0]) & 0x80000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) rport->supported_classes |= FC_COS_CLASS3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) if (rport->rqst_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) blk_queue_max_segments(rport->rqst_q, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) tgt_dbg(tgt, "rport add failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641) * ibmvfc_do_work - Do task level work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) static void ibmvfc_do_work(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) struct ibmvfc_target *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) struct fc_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) ibmvfc_log_ae(vhost, vhost->events_to_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) vhost->events_to_log = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) switch (vhost->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) case IBMVFC_HOST_ACTION_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657) case IBMVFC_HOST_ACTION_LOGO_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) case IBMVFC_HOST_ACTION_INIT_WAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) case IBMVFC_HOST_ACTION_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) rc = ibmvfc_reset_crq(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) if (!rc || rc == H_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) vio_enable_interrupts(to_vio_dev(vhost->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) * The only action we could have changed to would have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) * been reenable, in which case, we skip the rest of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) * this path and wait until we've done the re-enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) * before sending the crq init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) case IBMVFC_HOST_ACTION_REENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) rc = ibmvfc_reenable_crq_queue(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) * The only action we could have changed to would have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) * been reset, in which case, we skip the rest of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) * path and wait until we've done the reset before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) * sending the crq init.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) case IBMVFC_HOST_ACTION_LOGO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) vhost->job_step(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) case IBMVFC_HOST_ACTION_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) BUG_ON(vhost->state != IBMVFC_INITIALIZING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) if (vhost->delay_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) vhost->delay_init = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) ssleep(15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) vhost->job_step(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) case IBMVFC_HOST_ACTION_QUERY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) list_for_each_entry(tgt, &vhost->targets, queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY_TGTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) case IBMVFC_HOST_ACTION_QUERY_TGTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) list_for_each_entry(tgt, &vhost->targets, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) tgt->job_step(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) if (!ibmvfc_dev_init_to_do(vhost))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) case IBMVFC_HOST_ACTION_TGT_DEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) list_for_each_entry(tgt, &vhost->targets, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) if (tgt->action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) tgt->job_step(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) if (ibmvfc_dev_logo_to_do(vhost)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) list_for_each_entry(tgt, &vhost->targets, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) tgt_dbg(tgt, "Deleting rport\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) rport = tgt->rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) tgt->rport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) list_del(&tgt->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DELETED_RPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) if (rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754) fc_remote_port_delete(rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) del_timer_sync(&tgt->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) rport = tgt->rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) tgt->rport = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) if (rport)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) fc_remote_port_delete(rport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) if (vhost->state == IBMVFC_INITIALIZING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) if (vhost->reinit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) vhost->reinit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774) scsi_block_requests(vhost->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) wake_up(&vhost->init_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) schedule_work(&vhost->rport_add_work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) vhost->init_retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) scsi_unblock_requests(vhost->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) vhost->job_step = ibmvfc_discover_targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) scsi_unblock_requests(vhost->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) wake_up(&vhost->init_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) case IBMVFC_HOST_ACTION_ALLOC_TGTS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_INIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) ibmvfc_alloc_targets(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) case IBMVFC_HOST_ACTION_TGT_INIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) list_for_each_entry(tgt, &vhost->targets, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) if (tgt->action == IBMVFC_TGT_ACTION_INIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) tgt->job_step(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) if (!ibmvfc_dev_init_to_do(vhost))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) * ibmvfc_work - Do task level work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) * @data: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) * zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) static int ibmvfc_work(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) struct ibmvfc_host *vhost = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) set_user_nice(current, MIN_NICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) rc = wait_event_interruptible(vhost->work_wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) ibmvfc_work_to_do(vhost));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) BUG_ON(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) if (kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) ibmvfc_do_work(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) ibmvfc_dbg(vhost, "ibmvfc kthread exiting...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) * ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) * Allocates a page for messages, maps it for dma, and registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) * the crq with the hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) * zero on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) int rc, retrc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) struct device *dev = vhost->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) struct vio_dev *vdev = to_vio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) struct ibmvfc_crq_queue *crq = &vhost->crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) if (!crq->msgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) crq->size = PAGE_SIZE / sizeof(*crq->msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) crq->msg_token = dma_map_single(dev, crq->msgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) PAGE_SIZE, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) if (dma_mapping_error(dev, crq->msg_token))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) goto map_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) crq->msg_token, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) if (rc == H_RESOURCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) /* maybe kexecing and resource is busy. try a reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) retrc = rc = ibmvfc_reset_crq(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) if (rc == H_CLOSED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) dev_warn(dev, "Partner adapter not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) else if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) dev_warn(dev, "Error %d opening adapter\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) goto reg_crq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) retrc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) tasklet_init(&vhost->tasklet, (void *)ibmvfc_tasklet, (unsigned long)vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) if ((rc = request_irq(vdev->irq, ibmvfc_interrupt, 0, IBMVFC_NAME, vhost))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) goto req_irq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) if ((rc = vio_enable_interrupts(vdev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) dev_err(dev, "Error %d enabling interrupts\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) goto req_irq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) crq->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) return retrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) req_irq_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) tasklet_kill(&vhost->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) reg_crq_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) map_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) free_page((unsigned long)crq->msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) return retrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) * ibmvfc_free_mem - Free memory for vhost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) mempool_destroy(vhost->tgt_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) kfree(vhost->trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) dma_free_coherent(vhost->dev, vhost->disc_buf_sz, vhost->disc_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) vhost->disc_buf_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) vhost->login_buf, vhost->login_buf_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) dma_pool_destroy(vhost->sg_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) dma_unmap_single(vhost->dev, async_q->msg_token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) free_page((unsigned long)async_q->msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) * ibmvfc_alloc_mem - Allocate memory for vhost
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955) * @vhost: ibmvfc host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) * 0 on success / non-zero on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) struct device *dev = vhost->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967) if (!async_q->msgs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) dev_err(dev, "Couldn't allocate async queue.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) goto nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) async_q->msg_token = dma_map_single(dev, async_q->msgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) async_q->size * sizeof(*async_q->msgs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) if (dma_mapping_error(dev, async_q->msg_token)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) dev_err(dev, "Failed to map async queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) goto free_async_crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982) vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) SG_ALL * sizeof(struct srp_direct_buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) sizeof(struct srp_direct_buf), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) if (!vhost->sg_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) dev_err(dev, "Failed to allocate sg pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) goto unmap_async_crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) vhost->login_buf = dma_alloc_coherent(dev, sizeof(*vhost->login_buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) &vhost->login_buf_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) if (!vhost->login_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) dev_err(dev, "Couldn't allocate NPIV login buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) goto free_sg_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) &vhost->disc_buf_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) if (!vhost->disc_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) dev_err(dev, "Couldn't allocate Discover Targets buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) goto free_login_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) if (!vhost->trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) goto free_disc_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) sizeof(struct ibmvfc_target));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) if (!vhost->tgt_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) dev_err(dev, "Couldn't allocate target memory pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) goto free_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) free_trace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) kfree(vhost->trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) free_disc_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) dma_free_coherent(dev, vhost->disc_buf_sz, vhost->disc_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) vhost->disc_buf_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) free_login_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) dma_free_coherent(dev, sizeof(*vhost->login_buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) vhost->login_buf, vhost->login_buf_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) free_sg_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) dma_pool_destroy(vhost->sg_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) unmap_async_crq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) dma_unmap_single(dev, async_q->msg_token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) free_async_crq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) free_page((unsigned long)async_q->msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) * ibmvfc_rport_add_thread - Worker thread for rport adds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) * @work: work struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) static void ibmvfc_rport_add_thread(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) rport_add_work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054) struct ibmvfc_target *tgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) struct fc_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057) int did_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) did_work = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) if (vhost->state != IBMVFC_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) list_for_each_entry(tgt, &vhost->targets, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) if (tgt->add_rport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) did_work = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) tgt->add_rport = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) kref_get(&tgt->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) rport = tgt->rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) if (!rport) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) ibmvfc_tgt_add_rport(tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) } else if (get_device(&rport->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) tgt_dbg(tgt, "Setting rport roles\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) fc_remote_port_rolechg(rport, tgt->ids.roles);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) put_device(&rport->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) kref_put(&tgt->kref, ibmvfc_release_tgt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) } while(did_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) if (vhost->state == IBMVFC_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) vhost->scan_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) * ibmvfc_probe - Adapter hot plug add entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) * @vdev: vio device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) * @id: vio device id struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) * 0 on success / non-zero on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) struct ibmvfc_host *vhost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) struct Scsi_Host *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) struct device *dev = &vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) int rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) if (!shost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) dev_err(dev, "Couldn't allocate host data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) shost->transportt = ibmvfc_transport_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) shost->can_queue = max_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) shost->max_lun = max_lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) shost->max_id = max_targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) shost->max_sectors = IBMVFC_MAX_SECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) shost->unique_id = shost->host_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) vhost = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) INIT_LIST_HEAD(&vhost->sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) INIT_LIST_HEAD(&vhost->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) INIT_LIST_HEAD(&vhost->targets);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) sprintf(vhost->name, IBMVFC_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) vhost->host = shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) vhost->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) vhost->partition_number = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) vhost->log_level = log_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) vhost->task_set = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) strcpy(vhost->partition_name, "UNKNOWN");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) init_waitqueue_head(&vhost->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) init_waitqueue_head(&vhost->init_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) mutex_init(&vhost->passthru_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143) if ((rc = ibmvfc_alloc_mem(vhost)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) goto free_scsi_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) vhost->work_thread = kthread_run(ibmvfc_work, vhost, "%s_%d", IBMVFC_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) shost->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) if (IS_ERR(vhost->work_thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) dev_err(dev, "Couldn't create kernel thread: %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) PTR_ERR(vhost->work_thread));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) rc = PTR_ERR(vhost->work_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) goto free_host_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) if ((rc = ibmvfc_init_crq(vhost))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) goto kill_kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) if ((rc = ibmvfc_init_event_pool(vhost))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) goto release_crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) if ((rc = scsi_add_host(shost, dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) goto release_event_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) if ((rc = ibmvfc_create_trace_file(&shost->shost_dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) &ibmvfc_trace_attr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) dev_err(dev, "Failed to create trace file. rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) goto remove_shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) if (shost_to_fc_host(shost)->rqst_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) dev_set_drvdata(dev, vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) spin_lock(&ibmvfc_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) list_add_tail(&vhost->queue, &ibmvfc_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) spin_unlock(&ibmvfc_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) ibmvfc_send_crq_init(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) scsi_scan_host(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) remove_shost:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) scsi_remove_host(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) release_event_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) ibmvfc_free_event_pool(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) release_crq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) ibmvfc_release_crq_queue(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) kill_kthread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) kthread_stop(vhost->work_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) free_host_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) ibmvfc_free_mem(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) free_scsi_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) scsi_host_put(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) * ibmvfc_remove - Adapter hot plug remove entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) * @vdev: vio device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) * 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) static int ibmvfc_remove(struct vio_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) ibmvfc_remove_trace_file(&vhost->host->shost_dev.kobj, &ibmvfc_trace_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) ibmvfc_wait_while_resetting(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) ibmvfc_release_crq_queue(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) kthread_stop(vhost->work_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) fc_remove_host(vhost->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) scsi_remove_host(vhost->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) ibmvfc_purge_requests(vhost, DID_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) ibmvfc_free_event_pool(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) ibmvfc_free_mem(vhost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) spin_lock(&ibmvfc_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) list_del(&vhost->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) spin_unlock(&ibmvfc_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) scsi_host_put(vhost->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) * ibmvfc_resume - Resume from suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) * @dev: device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248) * We may have lost an interrupt across suspend/resume, so kick the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) * interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) static int ibmvfc_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) struct ibmvfc_host *vhost = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) struct vio_dev *vdev = to_vio_dev(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) spin_lock_irqsave(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) vio_disable_interrupts(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) tasklet_schedule(&vhost->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) spin_unlock_irqrestore(vhost->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) * @vdev: vio device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) * Number of bytes the driver will need to DMA map at the same time in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) * order to perform well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) unsigned long pool_dma = max_requests * sizeof(union ibmvfc_iu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) static const struct vio_device_id ibmvfc_device_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) {"fcp", "IBM,vfc-client"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) { "", "" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) static const struct dev_pm_ops ibmvfc_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) .resume = ibmvfc_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) static struct vio_driver ibmvfc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) .id_table = ibmvfc_device_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) .probe = ibmvfc_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) .remove = ibmvfc_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) .get_desired_dma = ibmvfc_get_desired_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) .name = IBMVFC_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) .pm = &ibmvfc_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) static struct fc_function_template ibmvfc_transport_functions = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) .show_host_fabric_name = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) .show_host_node_name = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) .show_host_port_name = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) .show_host_supported_classes = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) .show_host_port_type = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) .show_host_port_id = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) .show_host_maxframe_size = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) .get_host_port_state = ibmvfc_get_host_port_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) .show_host_port_state = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) .get_host_speed = ibmvfc_get_host_speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) .show_host_speed = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) .terminate_rport_io = ibmvfc_terminate_rport_io,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) .show_rport_maxframe_size = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) .show_rport_supported_classes = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) .show_rport_dev_loss_tmo = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) .get_starget_node_name = ibmvfc_get_starget_node_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) .show_starget_node_name = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) .get_starget_port_name = ibmvfc_get_starget_port_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) .show_starget_port_name = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) .get_starget_port_id = ibmvfc_get_starget_port_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) .show_starget_port_id = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) .bsg_request = ibmvfc_bsg_request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) .bsg_timeout = ibmvfc_bsg_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) * ibmvfc_module_init - Initialize the ibmvfc module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) static int __init ibmvfc_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) if (!firmware_has_feature(FW_FEATURE_VIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) printk(KERN_INFO IBMVFC_NAME": IBM Virtual Fibre Channel Driver version: %s %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) IBMVFC_DRIVER_VERSION, IBMVFC_DRIVER_DATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) ibmvfc_transport_template = fc_attach_transport(&ibmvfc_transport_functions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) if (!ibmvfc_transport_template)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) rc = vio_register_driver(&ibmvfc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) fc_release_transport(ibmvfc_transport_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) * ibmvfc_module_exit - Teardown the ibmvfc module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) static void __exit ibmvfc_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) vio_unregister_driver(&ibmvfc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) fc_release_transport(ibmvfc_transport_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) module_init(ibmvfc_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) module_exit(ibmvfc_module_exit);