Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /* ------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * ibmvscsi.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * (C) Copyright IBM Corporation 1994, 2004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *          Santiago Leon (santil@us.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *          Dave Boutcher (sleddog@us.ibm.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * ------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Emulation of a SCSI host adapter for Virtual I/O devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * This driver supports the SCSI adapter implemented by the IBM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * Power5 firmware.  That SCSI adapter is not a physical adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * but allows Linux SCSI peripheral drivers to directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * access devices in another logical partition on the physical system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * The virtual adapter(s) are present in the open firmware device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * tree just like real adapters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * One of the capabilities provided on these systems is the ability
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * to DMA between partitions.  The architecture states that for VSCSI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * the server side is allowed to DMA to and from the client.  The client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * is never trusted to DMA to or from the server directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * Messages are sent between partitions on a "Command/Response Queue" 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * (CRQ), which is just a buffer of 16 byte entries in the receiver's 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * Senders cannot access the buffer directly, but send messages by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  * making a hypervisor call and passing in the 16 bytes.  The hypervisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * puts the message in the next 16 byte space in round-robin fashion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * turns on the high order bit of the message (the valid bit), and 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * generates an interrupt to the receiver (if interrupts are turned on.) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * The receiver just turns off the valid bit when they have copied out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * the message.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * The VSCSI client builds a SCSI Remote Protocol (SRP) Information Unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * (IU) (as defined in the T10 standard available at www.t10.org), gets 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * a DMA address for the message, and sends it to the server as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * payload of a CRQ message.  The server DMAs the SRP IU and processes it,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * including doing any additional data transfers.  When it is done, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * DMAs the SRP response back to the same address as the request came from,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * and sends a CRQ message back to inform the client that the request has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  * completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  * TODO: This is currently pretty tied to the IBM pSeries hypervisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  * interfaces.  It would be really nice to abstract this above an RDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  * layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #include <asm/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #include <asm/vio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #include <scsi/scsi_transport_srp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #include "ibmvscsi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) /* The values below are somewhat arbitrary default values, but 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  * OS/400 will use 3 busses (disks, CDs, tapes, I think.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  * Note that there are 3 bits of channel value, 6 bits of id, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69)  * 5 bits of LUN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) static int max_id = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static int max_channel = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static int init_timeout = 300;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) static int login_timeout = 60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) static int info_timeout = 30;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) static int abort_timeout = 60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) static int reset_timeout = 60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) static int fast_fail = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static int client_reserve = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) static char partition_name[96] = "UNKNOWN";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) static unsigned int partition_number = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) static LIST_HEAD(ibmvscsi_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) static DEFINE_SPINLOCK(ibmvscsi_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) static struct scsi_transport_template *ibmvscsi_transport_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #define IBMVSCSI_VERSION "1.5.9"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) MODULE_DESCRIPTION("IBM Virtual SCSI");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) MODULE_AUTHOR("Dave Boutcher");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) MODULE_VERSION(IBMVSCSI_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) module_param_named(max_id, max_id, int, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) MODULE_PARM_DESC(max_id, "Largest ID value for each channel [Default=64]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) module_param_named(max_channel, max_channel, int, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) MODULE_PARM_DESC(max_channel, "Largest channel value [Default=3]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) module_param_named(max_requests, max_requests, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) module_param_named(client_reserve, client_reserve, int, S_IRUGO );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 				struct ibmvscsi_host_data *hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) /* ------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  * Routines for managing the command/response queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  * ibmvscsi_handle_event: - Interrupt handler for crq events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117)  * @irq:	number of irq to handle, not used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  * @dev_instance: ibmvscsi_host_data of host that received interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  * Disables interrupts and schedules srp_task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  * Always returns IRQ_HANDLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) static irqreturn_t ibmvscsi_handle_event(int irq, void *dev_instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	struct ibmvscsi_host_data *hostdata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	    (struct ibmvscsi_host_data *)dev_instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	vio_disable_interrupts(to_vio_dev(hostdata->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	tasklet_schedule(&hostdata->srp_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  * release_crq_queue: - Deallocates data and unregisters CRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  * @queue:	crq_queue to initialize and register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  * @host_data:	ibmvscsi_host_data of host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  * the crq with the hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) static void ibmvscsi_release_crq_queue(struct crq_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 				       struct ibmvscsi_host_data *hostdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 				       int max_requests)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	long rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	struct vio_dev *vdev = to_vio_dev(hostdata->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	free_irq(vdev->irq, (void *)hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	tasklet_kill(&hostdata->srp_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 			msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	dma_unmap_single(hostdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 			 queue->msg_token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	free_page((unsigned long)queue->msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  * crq_queue_next_crq: - Returns the next entry in message queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  * @queue:	crq_queue to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163)  * Returns pointer to next entry in queue, or NULL if there are no new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164)  * entried in the CRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	struct viosrp_crq *crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	spin_lock_irqsave(&queue->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	crq = &queue->msgs[queue->cur];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	if (crq->valid != VIOSRP_CRQ_FREE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		if (++queue->cur == queue->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 			queue->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		/* Ensure the read of the valid bit occurs before reading any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		 * other bits of the CRQ entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		crq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	spin_unlock_irqrestore(&queue->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	return crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  * ibmvscsi_send_crq: - Send a CRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  * @hostdata:	the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  * @word1:	the first 64 bits of the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  * @word2:	the second 64 bits of the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 			     u64 word1, u64 word2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	struct vio_dev *vdev = to_vio_dev(hostdata->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	 * Ensure the command buffer is flushed to memory before handing it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	 * over to the VIOS to prevent it from fetching any stale data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208)  * ibmvscsi_task: - Process srps asynchronously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209)  * @data:	ibmvscsi_host_data of host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) static void ibmvscsi_task(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	struct vio_dev *vdev = to_vio_dev(hostdata->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	struct viosrp_crq *crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	int done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	while (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		/* Pull all the valid messages off the CRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 			ibmvscsi_handle_crq(crq, hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 			crq->valid = VIOSRP_CRQ_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 			wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 		vio_enable_interrupts(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 		crq = crq_queue_next_crq(&hostdata->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		if (crq != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 			vio_disable_interrupts(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 			ibmvscsi_handle_crq(crq, hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 			crq->valid = VIOSRP_CRQ_FREE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 			wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 			done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) static void gather_partition_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	const char *ppartition_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	const __be32 *p_number_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	/* Retrieve information about this partition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	if (!of_root)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	of_node_get(of_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	if (ppartition_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		strlcpy(partition_name, ppartition_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 				sizeof(partition_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	if (p_number_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		partition_number = of_read_number(p_number_ptr, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	of_node_put(of_root);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	memset(&hostdata->madapter_info, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 			sizeof(hostdata->madapter_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	strncpy(hostdata->madapter_info.partition_name, partition_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 			sizeof(hostdata->madapter_info.partition_name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	hostdata->madapter_info.partition_number =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 					cpu_to_be32(partition_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	hostdata->madapter_info.mad_version = cpu_to_be32(SRP_MAD_VERSION_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	hostdata->madapter_info.os_type = cpu_to_be32(SRP_MAD_OS_LINUX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  * reset_crq_queue: - resets a crq after a failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  * @queue:	crq_queue to initialize and register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  * @hostdata:	ibmvscsi_host_data of host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) static int ibmvscsi_reset_crq_queue(struct crq_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 				    struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	struct vio_dev *vdev = to_vio_dev(hostdata->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	/* Close the CRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 			msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	/* Clean out the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	memset(queue->msgs, 0x00, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	queue->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	set_adapter_info(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	/* And re-open it again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	rc = plpar_hcall_norets(H_REG_CRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 				vdev->unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 				queue->msg_token, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	if (rc == H_CLOSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		/* Adapter is good, but other end is not ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		dev_warn(hostdata->dev, "Partner adapter not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	} else if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317)  * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318)  * @queue:	crq_queue to initialize and register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319)  * @hostdata:	ibmvscsi_host_data of host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321)  * Allocates a page for messages, maps it for dma, and registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322)  * the crq with the hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323)  * Returns zero on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) static int ibmvscsi_init_crq_queue(struct crq_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 				   struct ibmvscsi_host_data *hostdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 				   int max_requests)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	int retrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	struct vio_dev *vdev = to_vio_dev(hostdata->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	if (!queue->msgs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		goto malloc_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	queue->size = PAGE_SIZE / sizeof(*queue->msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 					  queue->size * sizeof(*queue->msgs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 					  DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	if (dma_mapping_error(hostdata->dev, queue->msg_token))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		goto map_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	gather_partition_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	set_adapter_info(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	retrc = rc = plpar_hcall_norets(H_REG_CRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 				vdev->unit_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 				queue->msg_token, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	if (rc == H_RESOURCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		/* maybe kexecing and resource is busy. try a reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		rc = ibmvscsi_reset_crq_queue(queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 					      hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	if (rc == H_CLOSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		/* Adapter is good, but other end is not ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		dev_warn(hostdata->dev, "Partner adapter not ready\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		retrc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	} else if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		goto reg_crq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	queue->cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	spin_lock_init(&queue->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	tasklet_init(&hostdata->srp_task, (void *)ibmvscsi_task,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		     (unsigned long)hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	if (request_irq(vdev->irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 			ibmvscsi_handle_event,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 			0, "ibmvscsi", (void *)hostdata) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			vdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		goto req_irq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	rc = vio_enable_interrupts(vdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		goto req_irq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	return retrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388)       req_irq_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	tasklet_kill(&hostdata->srp_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 			msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396)       reg_crq_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	dma_unmap_single(hostdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 			 queue->msg_token,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 			 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400)       map_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	free_page((unsigned long)queue->msgs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402)       malloc_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407)  * reenable_crq_queue: - reenables a crq after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408)  * @queue:	crq_queue to initialize and register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  * @hostdata:	ibmvscsi_host_data of host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) static int ibmvscsi_reenable_crq_queue(struct crq_queue *queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 				       struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	struct vio_dev *vdev = to_vio_dev(hostdata->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	set_adapter_info(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	/* Re-enable the CRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 			msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	} while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) /* ------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433)  * Routines for the event pool and event structs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436)  * initialize_event_pool: - Allocates and initializes the event pool for a host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437)  * @pool:	event_pool to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438)  * @size:	Number of events in pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439)  * @hostdata:	ibmvscsi_host_data who owns the event pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441)  * Returns zero on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) static int initialize_event_pool(struct event_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 				 int size, struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	pool->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	pool->next = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	if (!pool->events)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	pool->iu_storage =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	    dma_alloc_coherent(hostdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			       pool->size * sizeof(*pool->iu_storage),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			       &pool->iu_token, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	if (!pool->iu_storage) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		kfree(pool->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	for (i = 0; i < pool->size; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		struct srp_event_struct *evt = &pool->events[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		memset(&evt->crq, 0x00, sizeof(evt->crq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		atomic_set(&evt->free, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		evt->crq.valid = VIOSRP_CRQ_CMD_RSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 			sizeof(*evt->xfer_iu) * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		evt->xfer_iu = pool->iu_storage + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		evt->hostdata = hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		evt->ext_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		evt->ext_list_token = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481)  * release_event_pool: - Frees memory of an event pool of a host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482)  * @pool:	event_pool to be released
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483)  * @hostdata:	ibmvscsi_host_data who owns the even pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485)  * Returns zero on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) static void release_event_pool(struct event_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 			       struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	int i, in_use = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	for (i = 0; i < pool->size; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		if (atomic_read(&pool->events[i].free) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 			++in_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		if (pool->events[i].ext_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 			dma_free_coherent(hostdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 				  SG_ALL * sizeof(struct srp_direct_buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 				  pool->events[i].ext_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 				  pool->events[i].ext_list_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	if (in_use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		dev_warn(hostdata->dev, "releasing event pool with %d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			 "events still in use?\n", in_use);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	kfree(pool->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	dma_free_coherent(hostdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 			  pool->size * sizeof(*pool->iu_storage),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			  pool->iu_storage, pool->iu_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511)  * valid_event_struct: - Determines if event is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  * @pool:	event_pool that contains the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513)  * @evt:	srp_event_struct to be checked for validity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515)  * Returns zero if event is invalid, one otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) static int valid_event_struct(struct event_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 				struct srp_event_struct *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	int index = evt - pool->events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	if (index < 0 || index >= pool->size)	/* outside of bounds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	if (evt != pool->events + index)	/* unaligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  * ibmvscsi_free-event_struct: - Changes status of event to "free"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  * @pool:	event_pool that contains the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531)  * @evt:	srp_event_struct to be modified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) static void free_event_struct(struct event_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 				       struct srp_event_struct *evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	if (!valid_event_struct(pool, evt)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		dev_err(evt->hostdata->dev, "Freeing invalid event_struct %p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			"(not in pool %p)\n", evt, pool->events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	if (atomic_inc_return(&evt->free) != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		dev_err(evt->hostdata->dev, "Freeing event_struct %p "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 			"which is not in use!\n", evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550)  * get_evt_struct: - Gets the next free event in pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551)  * @pool:	event_pool that contains the events to be searched
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  * Returns the next event in "free" state, and NULL if none are free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  * Note that no synchronization is done here, we assume the host_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  * will syncrhonze things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) static struct srp_event_struct *get_event_struct(struct event_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	int poolsize = pool->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	int offset = pool->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	for (i = 0; i < poolsize; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		offset = (offset + 1) % poolsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		if (!atomic_dec_if_positive(&pool->events[offset].free)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 			pool->next = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 			return &pool->events[offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	printk(KERN_ERR "ibmvscsi: found no event struct in pool!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  * init_event_struct: Initialize fields in an event struct that are always 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  *                    required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  * @evt:        The event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579)  * @done:       Routine to call when the event is responded to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580)  * @format:     SRP or MAD format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581)  * @timeout:    timeout value set in the CRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) static void init_event_struct(struct srp_event_struct *evt_struct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 			      void (*done) (struct srp_event_struct *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 			      u8 format,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 			      int timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	evt_struct->cmnd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	evt_struct->cmnd_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	evt_struct->sync_srp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	evt_struct->crq.format = format;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	evt_struct->crq.timeout = cpu_to_be16(timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	evt_struct->done = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) /* ------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597)  * Routines for receiving SCSI responses from the hosting partition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601)  * set_srp_direction: Set the fields in the srp related to data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602)  *     direction and number of buffers based on the direction in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603)  *     the scsi_cmnd and the number of buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) static void set_srp_direction(struct scsi_cmnd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			      struct srp_cmd *srp_cmd, 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			      int numbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	u8 fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	if (numbuf == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	if (numbuf == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		fmt = SRP_DATA_DESC_DIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		fmt = SRP_DATA_DESC_INDIRECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		numbuf = min(numbuf, MAX_INDIRECT_BUFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		if (cmd->sc_data_direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 			srp_cmd->data_out_desc_cnt = numbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 			srp_cmd->data_in_desc_cnt = numbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	if (cmd->sc_data_direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		srp_cmd->buf_fmt = fmt << 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		srp_cmd->buf_fmt = fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633)  * unmap_cmd_data: - Unmap data pointed in srp_cmd based on the format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634)  * @cmd:	srp_cmd whose additional_data member will be unmapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635)  * @dev:	device for which the memory is mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) static void unmap_cmd_data(struct srp_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			   struct srp_event_struct *evt_struct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 			   struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	u8 out_fmt, in_fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	out_fmt = cmd->buf_fmt >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	in_fmt = cmd->buf_fmt & ((1U << 4) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	if (out_fmt == SRP_NO_DATA_DESC && in_fmt == SRP_NO_DATA_DESC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	if (evt_struct->cmnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		scsi_dma_unmap(evt_struct->cmnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		       struct srp_direct_buf *md)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	u64 total_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	scsi_for_each_sg(cmd, sg, nseg, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		struct srp_direct_buf *descr = md + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		descr->va = cpu_to_be64(sg_dma_address(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		descr->len = cpu_to_be32(sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		descr->key = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		total_length += sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667)  	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	return total_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672)  * map_sg_data: - Maps dma for a scatterlist and initializes descriptor fields
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673)  * @cmd:	struct scsi_cmnd with the scatterlist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674)  * @srp_cmd:	srp_cmd that contains the memory descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675)  * @dev:	device for which to map dma memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677)  * Called by map_data_for_srp_cmd() when building srp cmd from scsi cmd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  * Returns 1 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) static int map_sg_data(struct scsi_cmnd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		       struct srp_event_struct *evt_struct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		       struct srp_cmd *srp_cmd, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	int sg_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	u64 total_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	struct srp_direct_buf *data =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		(struct srp_direct_buf *) srp_cmd->add_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	struct srp_indirect_buf *indirect =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		(struct srp_indirect_buf *) data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	sg_mapped = scsi_dma_map(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	if (!sg_mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	else if (sg_mapped < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	set_srp_direction(cmd, srp_cmd, sg_mapped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	/* special case; we can use a single direct descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	if (sg_mapped == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		map_sg_list(cmd, sg_mapped, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	indirect->table_desc.va = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	indirect->table_desc.len = cpu_to_be32(sg_mapped *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 					       sizeof(struct srp_direct_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	indirect->table_desc.key = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	if (sg_mapped <= MAX_INDIRECT_BUFS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		total_length = map_sg_list(cmd, sg_mapped,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 					   &indirect->desc_list[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		indirect->len = cpu_to_be32(total_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	/* get indirect table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	if (!evt_struct->ext_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		evt_struct->ext_list = (struct srp_direct_buf *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			dma_alloc_coherent(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 					   SG_ALL * sizeof(struct srp_direct_buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 					   &evt_struct->ext_list_token, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		if (!evt_struct->ext_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			if (!firmware_has_feature(FW_FEATURE_CMO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 				sdev_printk(KERN_ERR, cmd->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 				            "Can't allocate memory "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 				            "for indirect table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 			scsi_dma_unmap(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	indirect->len = cpu_to_be32(total_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	indirect->table_desc.len = cpu_to_be32(sg_mapped *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 					       sizeof(indirect->desc_list[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	memcpy(indirect->desc_list, evt_struct->ext_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	       MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742)  	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746)  * map_data_for_srp_cmd: - Calls functions to map data for srp cmds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747)  * @cmd:	struct scsi_cmnd with the memory to be mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748)  * @srp_cmd:	srp_cmd that contains the memory descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749)  * @dev:	dma device for which to map dma memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751)  * Called by scsi_cmd_to_srp_cmd() when converting scsi cmds to srp cmds 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752)  * Returns 1 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) static int map_data_for_srp_cmd(struct scsi_cmnd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 				struct srp_event_struct *evt_struct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 				struct srp_cmd *srp_cmd, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	switch (cmd->sc_data_direction) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	case DMA_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		sdev_printk(KERN_ERR, cmd->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			    "Can't map DMA_BIDIRECTIONAL to read/write\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		sdev_printk(KERN_ERR, cmd->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			    "Unknown data direction 0x%02x; can't map!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 			    cmd->sc_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	return map_sg_data(cmd, evt_struct, srp_cmd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779)  * purge_requests: Our virtual adapter just shut down.  purge any sent requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780)  * @hostdata:    the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	struct srp_event_struct *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	while (!list_empty(&hostdata->sent)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		evt = list_first_entry(&hostdata->sent, struct srp_event_struct, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		list_del(&evt->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		del_timer(&evt->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		if (evt->cmnd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			evt->cmnd->result = (error_code << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			unmap_cmd_data(&evt->iu.srp.cmd, evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 				       evt->hostdata->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 			if (evt->cmnd_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 				evt->cmnd_done(evt->cmnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 		} else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			   evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			evt->done(evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		free_event_struct(&evt->hostdata->pool, evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810)  * ibmvscsi_set_request_limit - Set the adapter request_limit in response to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811)  * an adapter failure, reset, or SRP Login. Done under host lock to prevent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812)  * race with SCSI command submission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813)  * @hostdata:	adapter to adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814)  * @limit:	new request limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) static void ibmvscsi_set_request_limit(struct ibmvscsi_host_data *hostdata, int limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	atomic_set(&hostdata->request_limit, limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826)  * ibmvscsi_reset_host - Reset the connection to the server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827)  * @hostdata:	struct ibmvscsi_host_data to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	scsi_block_requests(hostdata->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	ibmvscsi_set_request_limit(hostdata, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	purge_requests(hostdata, DID_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	hostdata->action = IBMVSCSI_HOST_ACTION_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	wake_up(&hostdata->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840)  * ibmvscsi_timeout - Internal command timeout handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841)  * @evt_struct:	struct srp_event_struct that timed out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843)  * Called when an internally generated command times out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) static void ibmvscsi_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	struct srp_event_struct *evt_struct = from_timer(evt_struct, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		evt_struct->iu.srp.cmd.opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	ibmvscsi_reset_host(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) /* ------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858)  * Routines for sending and receiving SRPs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861)  * ibmvscsi_send_srp_event: - Transforms event to u64 array and calls send_crq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862)  * @evt_struct:	evt_struct to be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863)  * @hostdata:	ibmvscsi_host_data of host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864)  * @timeout:	timeout in seconds - 0 means do not time command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866)  * Returns the value returned from ibmvscsi_send_crq(). (Zero for success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867)  * Note that this routine assumes that host_lock is held for synchronization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 				   struct ibmvscsi_host_data *hostdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 				   unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	__be64 *crq_as_u64 = (__be64 *)&evt_struct->crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	int request_status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	int srp_req = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	/* If we have exhausted our request limit, just fail this request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	 * unless it is for a reset or abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	 * Note that there are rare cases involving driver generated requests 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	 * (such as task management requests) that the mid layer may think we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	 * can handle more requests (can_queue) when we actually can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	if (evt_struct->crq.format == VIOSRP_SRP_FORMAT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		srp_req = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		request_status =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 			atomic_dec_if_positive(&hostdata->request_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		/* If request limit was -1 when we started, it is now even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		 * less than that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		if (request_status < -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 			goto send_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		/* Otherwise, we may have run out of requests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		/* If request limit was 0 when we started the adapter is in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		 * process of performing a login with the server adapter, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		 * we may have run out of requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		else if (request_status == -1 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		         evt_struct->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			goto send_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		/* Abort and reset calls should make it through.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		 * Nothing except abort and reset should use the last two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 		 * slots unless we had two or less to begin with.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		else if (request_status < 2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		         evt_struct->iu.srp.cmd.opcode != SRP_TSK_MGMT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			/* In the case that we have less than two requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 			 * available, check the server limit as a combination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 			 * of the request limit and the number of requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			 * in-flight (the size of the send list).  If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 			 * server limit is greater than 2, return busy so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 			 * that the last two are reserved for reset and abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			int server_limit = request_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 			struct srp_event_struct *tmp_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 			list_for_each_entry(tmp_evt, &hostdata->sent, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 				server_limit++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			if (server_limit > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 				goto send_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	/* Copy the IU into the transfer area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	*evt_struct->xfer_iu = evt_struct->iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	evt_struct->xfer_iu->srp.rsp.tag = (u64)evt_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	/* Add this to the sent list.  We need to do this 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	 * before we actually send 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	 * in case it comes back REALLY fast
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	list_add_tail(&evt_struct->list, &hostdata->sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	timer_setup(&evt_struct->timer, ibmvscsi_timeout, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	if (timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		evt_struct->timer.expires = jiffies + (timeout * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		add_timer(&evt_struct->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			       be64_to_cpu(crq_as_u64[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		list_del(&evt_struct->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 		del_timer(&evt_struct->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		 * Firmware will send a CRQ with a transport event (0xFF) to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		 * tell this client what has happened to the transport.  This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		 * will be handled in ibmvscsi_handle_crq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		if (rc == H_CLOSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 			dev_warn(hostdata->dev, "send warning. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			         "Receive queue closed, will retry.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			goto send_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		dev_err(hostdata->dev, "send error %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		if (srp_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 			atomic_inc(&hostdata->request_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		goto send_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966)  send_busy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	free_event_struct(&hostdata->pool, evt_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	if (srp_req && request_status != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		atomic_inc(&hostdata->request_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974)  send_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	unmap_cmd_data(&evt_struct->iu.srp.cmd, evt_struct, hostdata->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	if (evt_struct->cmnd != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		evt_struct->cmnd->result = DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		evt_struct->cmnd_done(evt_struct->cmnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	} else if (evt_struct->done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		evt_struct->done(evt_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	free_event_struct(&hostdata->pool, evt_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988)  * handle_cmd_rsp: -  Handle responses from commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989)  * @evt_struct:	srp_event_struct to be handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991)  * Used as a callback by when sending scsi cmds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992)  * Gets called by ibmvscsi_handle_crq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) static void handle_cmd_rsp(struct srp_event_struct *evt_struct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	struct srp_rsp *rsp = &evt_struct->xfer_iu->srp.rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	struct scsi_cmnd *cmnd = evt_struct->cmnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	if (unlikely(rsp->opcode != SRP_RSP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 			dev_warn(evt_struct->hostdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 				 "bad SRP RSP type %#02x\n", rsp->opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	if (cmnd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		cmnd->result |= rsp->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 			memcpy(cmnd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 			       rsp->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 			       be32_to_cpu(rsp->sense_data_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		unmap_cmd_data(&evt_struct->iu.srp.cmd, 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 			       evt_struct, 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 			       evt_struct->hostdata->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		if (rsp->flags & SRP_RSP_FLAG_DOOVER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			scsi_set_resid(cmnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 				       be32_to_cpu(rsp->data_out_res_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		else if (rsp->flags & SRP_RSP_FLAG_DIOVER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	if (evt_struct->cmnd_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		evt_struct->cmnd_done(cmnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)  * lun_from_dev: - Returns the lun of the scsi device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)  * @dev:	struct scsi_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static inline u16 lun_from_dev(struct scsi_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	return (0x2 << 14) | (dev->id << 8) | (dev->channel << 5) | dev->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)  * ibmvscsi_queue: - The queuecommand function of the scsi template 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)  * @cmd:	struct scsi_cmnd to be executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)  * @done:	Callback function to be called when cmd is completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 				 void (*done) (struct scsi_cmnd *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	struct srp_cmd *srp_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	struct srp_event_struct *evt_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	struct srp_indirect_buf *indirect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	struct ibmvscsi_host_data *hostdata = shost_priv(cmnd->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	u16 lun = lun_from_dev(cmnd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	u8 out_fmt, in_fmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	cmnd->result = (DID_OK << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	evt_struct = get_event_struct(&hostdata->pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	if (!evt_struct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	/* Set up the actual SRP IU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	srp_cmd = &evt_struct->iu.srp.cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	srp_cmd->opcode = SRP_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	int_to_scsilun(lun, &srp_cmd->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		if (!firmware_has_feature(FW_FEATURE_CMO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			sdev_printk(KERN_ERR, cmnd->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 			            "couldn't convert cmd to srp_cmd\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		free_event_struct(&hostdata->pool, evt_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	init_event_struct(evt_struct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			  handle_cmd_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 			  VIOSRP_SRP_FORMAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 			  cmnd->request->timeout/HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	evt_struct->cmnd = cmnd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	evt_struct->cmnd_done = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	/* Fix up dma address of the buffer itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	indirect = (struct srp_indirect_buf *) srp_cmd->add_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	out_fmt = srp_cmd->buf_fmt >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	in_fmt = srp_cmd->buf_fmt & ((1U << 4) - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	if ((in_fmt == SRP_DATA_DESC_INDIRECT ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	     out_fmt == SRP_DATA_DESC_INDIRECT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	    indirect->table_desc.va == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		indirect->table_desc.va =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 			cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			offsetof(struct srp_cmd, add_data) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			offsetof(struct srp_indirect_buf, desc_list));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static DEF_SCSI_QCMD(ibmvscsi_queuecommand)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /* ------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)  * Routines for driver initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)  * map_persist_bufs: - Pre-map persistent data for adapter logins
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)  * @hostdata:   ibmvscsi_host_data of host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)  * Map the capabilities and adapter info DMA buffers to avoid runtime failures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)  * Return 1 on error, 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 					     sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 						     &hostdata->madapter_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 						     sizeof(hostdata->madapter_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 						     DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		dma_unmap_single(hostdata->dev, hostdata->caps_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 				 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)  * unmap_persist_bufs: - Unmap persistent data needed for adapter logins
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135)  * @hostdata:   ibmvscsi_host_data of host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137)  * Unmap the capabilities and adapter info DMA buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	dma_unmap_single(hostdata->dev, hostdata->caps_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			 sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			 sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)  * login_rsp: - Handle response to SRP login request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)  * @evt_struct:	srp_event_struct with the response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)  * Used as a "done" callback by when sending srp_login. Gets called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)  * by ibmvscsi_handle_crq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) static void login_rsp(struct srp_event_struct *evt_struct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 	switch (evt_struct->xfer_iu->srp.login_rsp.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	case SRP_LOGIN_RSP:	/* it worked! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	case SRP_LOGIN_REJ:	/* refused! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		dev_info(hostdata->dev, "SRP_LOGIN_REJ reason %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			 evt_struct->xfer_iu->srp.login_rej.reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		/* Login failed.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		ibmvscsi_set_request_limit(hostdata, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		dev_err(hostdata->dev, "Invalid login response typecode 0x%02x!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			evt_struct->xfer_iu->srp.login_rsp.opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		/* Login failed.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		ibmvscsi_set_request_limit(hostdata, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	hostdata->client_migrated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	/* Now we know what the real request-limit is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	 * This value is set rather than added to request_limit because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	 * request_limit could have been set to -1 by this client.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	ibmvscsi_set_request_limit(hostdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		   be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	/* If we had any pending I/Os, kick them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	hostdata->action = IBMVSCSI_HOST_ACTION_UNBLOCK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	wake_up(&hostdata->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)  * send_srp_login: - Sends the srp login
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)  * @hostdata:	ibmvscsi_host_data of host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)  * Returns zero if successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) static int send_srp_login(struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	struct srp_login_req *login;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	BUG_ON(!evt_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	init_event_struct(evt_struct, login_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 			  VIOSRP_SRP_FORMAT, login_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	login = &evt_struct->iu.srp.login_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	memset(login, 0, sizeof(*login));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	login->opcode = SRP_LOGIN_REQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 					 SRP_BUF_FORMAT_INDIRECT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	/* Start out with a request limit of 0, since this is negotiated in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	 * the login request we are just sending and login requests always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	 * get sent by the driver regardless of request_limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	ibmvscsi_set_request_limit(hostdata, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	dev_info(hostdata->dev, "sent SRP login\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)  * capabilities_rsp: - Handle response to MAD adapter capabilities request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)  * @evt_struct:	srp_event_struct with the response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)  * Used as a "done" callback by when sending adapter_info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) static void capabilities_rsp(struct srp_event_struct *evt_struct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	if (evt_struct->xfer_iu->mad.capabilities.common.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 			evt_struct->xfer_iu->mad.capabilities.common.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		if (hostdata->caps.migration.common.server_support !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		    cpu_to_be16(SERVER_SUPPORTS_CAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 			dev_info(hostdata->dev, "Partition migration not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		if (client_reserve) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 			if (hostdata->caps.reserve.common.server_support ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			    cpu_to_be16(SERVER_SUPPORTS_CAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 				dev_info(hostdata->dev, "Client reserve enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 				dev_info(hostdata->dev, "Client reserve not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	send_srp_login(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)  * send_mad_capabilities: - Sends the mad capabilities request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)  *      and stores the result so it can be retrieved with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)  * @hostdata:	ibmvscsi_host_data of host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	struct viosrp_capabilities *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	struct srp_event_struct *evt_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	struct device_node *of_node = hostdata->dev->of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	const char *location;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	evt_struct = get_event_struct(&hostdata->pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	BUG_ON(!evt_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	init_event_struct(evt_struct, capabilities_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 			  VIOSRP_MAD_FORMAT, info_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	req = &evt_struct->iu.mad.capabilities;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	memset(req, 0, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	if (hostdata->client_migrated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	strlcpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		sizeof(hostdata->caps.name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	location = of_get_property(of_node, "ibm,loc-code", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	location = location ? location : dev_name(hostdata->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	strlcpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	req->buffer = cpu_to_be64(hostdata->caps_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	hostdata->caps.migration.common.cap_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 				cpu_to_be32(MIGRATION_CAPABILITIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	hostdata->caps.migration.common.length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 				cpu_to_be16(sizeof(hostdata->caps.migration));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	hostdata->caps.migration.common.server_support =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 				cpu_to_be16(SERVER_SUPPORTS_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	hostdata->caps.migration.ecl = cpu_to_be32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	if (client_reserve) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		hostdata->caps.reserve.common.cap_type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 					cpu_to_be32(RESERVATION_CAPABILITIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		hostdata->caps.reserve.common.length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 				cpu_to_be16(sizeof(hostdata->caps.reserve));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		hostdata->caps.reserve.common.server_support =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 				cpu_to_be16(SERVER_SUPPORTS_CAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		hostdata->caps.reserve.type =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 				cpu_to_be32(CLIENT_RESERVE_SCSI_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		req->common.length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 				cpu_to_be16(sizeof(hostdata->caps));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		req->common.length = cpu_to_be16(sizeof(hostdata->caps) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 						sizeof(hostdata->caps.reserve));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)  * fast_fail_rsp: - Handle response to MAD enable fast fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)  * @evt_struct:	srp_event_struct with the response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)  * Used as a "done" callback by when sending enable fast fail. Gets called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)  * by ibmvscsi_handle_crq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) static void fast_fail_rsp(struct srp_event_struct *evt_struct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	if (status == VIOSRP_MAD_NOT_SUPPORTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 		dev_err(hostdata->dev, "fast_fail not supported in server\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	else if (status == VIOSRP_MAD_FAILED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		dev_err(hostdata->dev, "fast_fail request failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	else if (status != VIOSRP_MAD_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	send_mad_capabilities(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)  * init_host - Start host initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)  * @hostdata:	ibmvscsi_host_data of host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)  * Returns zero if successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	struct viosrp_fast_fail *fast_fail_mad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	struct srp_event_struct *evt_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	if (!fast_fail) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		send_mad_capabilities(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	evt_struct = get_event_struct(&hostdata->pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	BUG_ON(!evt_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	fast_fail_mad = &evt_struct->iu.mad.fast_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379)  * adapter_info_rsp: - Handle response to MAD adapter info request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)  * @evt_struct:	srp_event_struct with the response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)  * Used as a "done" callback by when sending adapter_info. Gets called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)  * by ibmvscsi_handle_crq()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) static void adapter_info_rsp(struct srp_event_struct *evt_struct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		dev_err(hostdata->dev, "error %d getting adapter info\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 			evt_struct->xfer_iu->mad.adapter_info.common.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		dev_info(hostdata->dev, "host srp version: %s, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 			 "host partition %s (%d), OS %d, max io %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 			 hostdata->madapter_info.srp_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 			 hostdata->madapter_info.partition_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 			 be32_to_cpu(hostdata->madapter_info.partition_number),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 			 be32_to_cpu(hostdata->madapter_info.os_type),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 			 be32_to_cpu(hostdata->madapter_info.port_max_txu[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		if (hostdata->madapter_info.port_max_txu[0]) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 			hostdata->host->max_sectors = 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 				be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		    strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 			dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 				hostdata->madapter_info.srp_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 			dev_err(hostdata->dev, "limiting scatterlists to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 				MAX_INDIRECT_BUFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 			hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		if (be32_to_cpu(hostdata->madapter_info.os_type) == SRP_MAD_OS_AIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 			enable_fast_fail(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	send_srp_login(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)  * send_mad_adapter_info: - Sends the mad adapter info request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)  *      and stores the result so it can be retrieved with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)  *      sysfs.  We COULD consider causing a failure if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)  *      returned SRP version doesn't match ours.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)  * @hostdata:	ibmvscsi_host_data of host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)  * 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)  * Returns zero if successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	struct viosrp_adapter_info *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	struct srp_event_struct *evt_struct;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	evt_struct = get_event_struct(&hostdata->pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	BUG_ON(!evt_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	init_event_struct(evt_struct,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 			  adapter_info_rsp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 			  VIOSRP_MAD_FORMAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 			  info_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	req = &evt_struct->iu.mad.adapter_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	memset(req, 0x00, sizeof(*req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	req->buffer = cpu_to_be64(hostdata->adapter_info_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)  * init_adapter: Start virtual adapter initialization sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) static void init_adapter(struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	send_mad_adapter_info(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469)  * sync_completion: Signal that a synchronous command has completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)  * Note that after returning from this call, the evt_struct is freed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)  * the caller waiting on this completion shouldn't touch the evt_struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)  * again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) static void sync_completion(struct srp_event_struct *evt_struct)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	/* copy the response back */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	if (evt_struct->sync_srp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		*evt_struct->sync_srp = *evt_struct->xfer_iu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	complete(&evt_struct->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484)  * ibmvscsi_abort: Abort a command...from scsi host template
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)  * send this over to the server and wait synchronously for the response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	struct srp_tsk_mgmt *tsk_mgmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	struct srp_event_struct *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	struct srp_event_struct *tmp_evt, *found_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	union viosrp_iu srp_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	int rsp_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	u16 lun = lun_from_dev(cmd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	unsigned long wait_switch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	/* First, find this command in our sent list so we can figure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	 * out the correct tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	wait_switch = jiffies + (init_timeout * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		found_evt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		list_for_each_entry(tmp_evt, &hostdata->sent, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 			if (tmp_evt->cmnd == cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 				found_evt = tmp_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		if (!found_evt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 			spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 			return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		evt = get_event_struct(&hostdata->pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		if (evt == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 			spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 			sdev_printk(KERN_ERR, cmd->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 				"failed to allocate abort event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		init_event_struct(evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 				  sync_completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 				  VIOSRP_SRP_FORMAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 				  abort_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		tsk_mgmt = &evt->iu.srp.tsk_mgmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		/* Set up an abort SRP command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		tsk_mgmt->opcode = SRP_TSK_MGMT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		int_to_scsilun(lun, &tsk_mgmt->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 		tsk_mgmt->task_tag = (u64) found_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		evt->sync_srp = &srp_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		init_completion(&evt->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 		if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 		msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	} while (time_before(jiffies, wait_switch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	if (rsp_rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		sdev_printk(KERN_ERR, cmd->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 			    "failed to send abort() event. rc=%d\n", rsp_rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	sdev_printk(KERN_INFO, cmd->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)                     "aborting command. lun 0x%llx, tag 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		    (((u64) lun) << 48), (u64) found_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	wait_for_completion(&evt->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	/* make sure we got a good response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 			sdev_printk(KERN_WARNING, cmd->device, "abort bad SRP RSP type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 				    srp_rsp.srp.rsp.opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		rsp_rc = *((int *)srp_rsp.srp.rsp.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		rsp_rc = srp_rsp.srp.rsp.status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	if (rsp_rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 			sdev_printk(KERN_WARNING, cmd->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 				    "abort code %d for task tag 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 				    rsp_rc, tsk_mgmt->task_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	/* Because we dropped the spinlock above, it's possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	 * The event is no longer in our list.  Make sure it didn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	 * complete while we were aborting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	found_evt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	list_for_each_entry(tmp_evt, &hostdata->sent, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		if (tmp_evt->cmnd == cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 			found_evt = tmp_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	if (found_evt == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		sdev_printk(KERN_INFO, cmd->device, "aborted task tag 0x%llx completed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 			    tsk_mgmt->task_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	sdev_printk(KERN_INFO, cmd->device, "successfully aborted task tag 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		    tsk_mgmt->task_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	cmd->result = (DID_ABORT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	list_del(&found_evt->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	unmap_cmd_data(&found_evt->iu.srp.cmd, found_evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		       found_evt->hostdata->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	free_event_struct(&found_evt->hostdata->pool, found_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	atomic_inc(&hostdata->request_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)  * ibmvscsi_eh_device_reset_handler: Reset a single LUN...from scsi host 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623)  * template send this over to the server and wait synchronously for the 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624)  * response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	struct srp_tsk_mgmt *tsk_mgmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	struct srp_event_struct *evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	struct srp_event_struct *tmp_evt, *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	union viosrp_iu srp_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	int rsp_rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	u16 lun = lun_from_dev(cmd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	unsigned long wait_switch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	wait_switch = jiffies + (init_timeout * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		evt = get_event_struct(&hostdata->pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		if (evt == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 			spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 			sdev_printk(KERN_ERR, cmd->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 				"failed to allocate reset event\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 			return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 		init_event_struct(evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 				  sync_completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 				  VIOSRP_SRP_FORMAT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 				  reset_timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		tsk_mgmt = &evt->iu.srp.tsk_mgmt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		/* Set up a lun reset SRP command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 		tsk_mgmt->opcode = SRP_TSK_MGMT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		int_to_scsilun(lun, &tsk_mgmt->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		evt->sync_srp = &srp_rsp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		init_completion(&evt->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	} while (time_before(jiffies, wait_switch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	if (rsp_rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		sdev_printk(KERN_ERR, cmd->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 			    "failed to send reset event. rc=%d\n", rsp_rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	sdev_printk(KERN_INFO, cmd->device, "resetting device. lun 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		    (((u64) lun) << 48));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	wait_for_completion(&evt->comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	/* make sure we got a good response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	if (unlikely(srp_rsp.srp.rsp.opcode != SRP_RSP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			sdev_printk(KERN_WARNING, cmd->device, "reset bad SRP RSP type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 				    srp_rsp.srp.rsp.opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	if (srp_rsp.srp.rsp.flags & SRP_RSP_FLAG_RSPVALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		rsp_rc = *((int *)srp_rsp.srp.rsp.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		rsp_rc = srp_rsp.srp.rsp.status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	if (rsp_rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 			sdev_printk(KERN_WARNING, cmd->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 				    "reset code %d for task tag 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 				    rsp_rc, tsk_mgmt->task_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	/* We need to find all commands for this LUN that have not yet been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	 * responded to, and fail them with DID_RESET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	list_for_each_entry_safe(tmp_evt, pos, &hostdata->sent, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		if ((tmp_evt->cmnd) && (tmp_evt->cmnd->device == cmd->device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 			if (tmp_evt->cmnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 				tmp_evt->cmnd->result = (DID_RESET << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 			list_del(&tmp_evt->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 			unmap_cmd_data(&tmp_evt->iu.srp.cmd, tmp_evt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 				       tmp_evt->hostdata->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 			free_event_struct(&tmp_evt->hostdata->pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 						   tmp_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 			atomic_inc(&hostdata->request_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 			if (tmp_evt->cmnd_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 				tmp_evt->cmnd_done(tmp_evt->cmnd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 			else if (tmp_evt->done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 				tmp_evt->done(tmp_evt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734)  * ibmvscsi_eh_host_reset_handler - Reset the connection to the server
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)  * @cmd:	struct scsi_cmnd having problems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) static int ibmvscsi_eh_host_reset_handler(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	unsigned long wait_switch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	struct ibmvscsi_host_data *hostdata = shost_priv(cmd->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	dev_err(hostdata->dev, "Resetting connection due to error recovery\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	ibmvscsi_reset_host(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	for (wait_switch = jiffies + (init_timeout * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	     time_before(jiffies, wait_switch) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		     atomic_read(&hostdata->request_limit) < 2;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 		msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	if (atomic_read(&hostdata->request_limit) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 		return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)  * ibmvscsi_handle_crq: - Handles and frees received events in the CRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761)  * @crq:	Command/Response queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762)  * @hostdata:	ibmvscsi_host_data of host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) static void ibmvscsi_handle_crq(struct viosrp_crq *crq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 				struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	long rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	/* The hypervisor copies our tag value here so no byteswapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	struct srp_event_struct *evt_struct =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 			(__force struct srp_event_struct *)crq->IU_data_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	switch (crq->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	case VIOSRP_CRQ_INIT_RSP:		/* initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		switch (crq->format) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		case VIOSRP_CRQ_INIT:	/* Initialization message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 			dev_info(hostdata->dev, "partner initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 			/* Send back a response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 			rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 			if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 				/* Now login */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 				init_adapter(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 				dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		case VIOSRP_CRQ_INIT_COMPLETE:	/* Initialization response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 			dev_info(hostdata->dev, "partner initialization complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 			/* Now login */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 			init_adapter(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 			dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	case VIOSRP_CRQ_XPORT_EVENT:	/* Hypervisor telling us the connection is closed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		scsi_block_requests(hostdata->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		ibmvscsi_set_request_limit(hostdata, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		if (crq->format == 0x06) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 			/* We need to re-setup the interpartition connection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 			dev_info(hostdata->dev, "Re-enabling adapter!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 			hostdata->client_migrated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 			hostdata->action = IBMVSCSI_HOST_ACTION_REENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 			purge_requests(hostdata, DID_REQUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 			wake_up(&hostdata->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 			dev_err(hostdata->dev, "Virtual adapter failed rc %d!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 				crq->format);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 			ibmvscsi_reset_host(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	case VIOSRP_CRQ_CMD_RSP:		/* real payload */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 		dev_err(hostdata->dev, "got an invalid message type 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 			crq->valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	/* The only kind of payload CRQs we should get are responses to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	 * things we send. Make sure this response is to something we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	 * actually sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	if (!valid_event_struct(&hostdata->pool, evt_struct)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 		       evt_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	if (atomic_read(&evt_struct->free)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 			evt_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	if (crq->format == VIOSRP_SRP_FORMAT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 			   &hostdata->request_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	del_timer(&evt_struct->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	if ((crq->status != VIOSRP_OK && crq->status != VIOSRP_OK2) && evt_struct->cmnd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		evt_struct->cmnd->result = DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	if (evt_struct->done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		evt_struct->done(evt_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		dev_err(hostdata->dev, "returned done() is NULL; not running it!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	 * Lock the host_lock before messing with these structures, since we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	 * are running in a task context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	spin_lock_irqsave(evt_struct->hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	list_del(&evt_struct->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	free_event_struct(&evt_struct->hostdata->pool, evt_struct);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	spin_unlock_irqrestore(evt_struct->hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)  * ibmvscsi_slave_configure: Set the "allow_restart" flag for each disk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863)  * @sdev:	struct scsi_device device to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)  * Enable allow_restart for a device if it is a disk.  Adjust the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866)  * queue_depth here also as is required by the documentation for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)  * struct scsi_host_template.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) static int ibmvscsi_slave_configure(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	struct Scsi_Host *shost = sdev->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	spin_lock_irqsave(shost->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	if (sdev->type == TYPE_DISK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		sdev->allow_restart = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	spin_unlock_irqrestore(shost->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884)  * ibmvscsi_change_queue_depth - Change the device's queue depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885)  * @sdev:	scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)  * @qdepth:	depth to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)  * @reason:	calling context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)  * 	actual depth set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	return scsi_change_queue_depth(sdev, qdepth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) /* ------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900)  * sysfs attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) static ssize_t show_host_vhost_loc(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 				   struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		       hostdata->caps.loc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) static struct device_attribute ibmvscsi_host_vhost_loc = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		 .name = "vhost_loc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 		 .mode = S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	.show = show_host_vhost_loc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) static ssize_t show_host_vhost_name(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 				    struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 		       hostdata->caps.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) static struct device_attribute ibmvscsi_host_vhost_name = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		 .name = "vhost_name",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		 .mode = S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 	.show = show_host_vhost_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) static ssize_t show_host_srp_version(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 				     struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	len = snprintf(buf, PAGE_SIZE, "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 		       hostdata->madapter_info.srp_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) static struct device_attribute ibmvscsi_host_srp_version = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		 .name = "srp_version",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		 .mode = S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 	.show = show_host_srp_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) static ssize_t show_host_partition_name(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 					struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 					char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	len = snprintf(buf, PAGE_SIZE, "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		       hostdata->madapter_info.partition_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) static struct device_attribute ibmvscsi_host_partition_name = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		 .name = "partition_name",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		 .mode = S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	.show = show_host_partition_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) static ssize_t show_host_partition_number(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 					  struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 					  char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	len = snprintf(buf, PAGE_SIZE, "%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		       be32_to_cpu(hostdata->madapter_info.partition_number));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) static struct device_attribute ibmvscsi_host_partition_number = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		 .name = "partition_number",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		 .mode = S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	.show = show_host_partition_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) static ssize_t show_host_mad_version(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 				     struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	len = snprintf(buf, PAGE_SIZE, "%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		       be32_to_cpu(hostdata->madapter_info.mad_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) static struct device_attribute ibmvscsi_host_mad_version = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 		 .name = "mad_version",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		 .mode = S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	.show = show_host_mad_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) static ssize_t show_host_os_type(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 				 struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	len = snprintf(buf, PAGE_SIZE, "%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		       be32_to_cpu(hostdata->madapter_info.os_type));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) static struct device_attribute ibmvscsi_host_os_type = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 		 .name = "os_type",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		 .mode = S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	.show = show_host_os_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) static ssize_t show_host_config(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 				struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) static struct device_attribute ibmvscsi_host_config = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		.name = "config",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 		.mode = S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	.show = show_host_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) static int ibmvscsi_host_reset(struct Scsi_Host *shost, int reset_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	struct ibmvscsi_host_data *hostdata = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	dev_info(hostdata->dev, "Initiating adapter reset!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	ibmvscsi_reset_host(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) static struct device_attribute *ibmvscsi_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	&ibmvscsi_host_vhost_loc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	&ibmvscsi_host_vhost_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	&ibmvscsi_host_srp_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	&ibmvscsi_host_partition_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	&ibmvscsi_host_partition_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	&ibmvscsi_host_mad_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	&ibmvscsi_host_os_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	&ibmvscsi_host_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) /* ------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)  * SCSI driver registration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) static struct scsi_host_template driver_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	.module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	.name = "IBM POWER Virtual SCSI Adapter " IBMVSCSI_VERSION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	.proc_name = "ibmvscsi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	.queuecommand = ibmvscsi_queuecommand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 	.eh_timed_out = srp_timed_out,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	.eh_abort_handler = ibmvscsi_eh_abort_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	.eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	.eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	.slave_configure = ibmvscsi_slave_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	.change_queue_depth = ibmvscsi_change_queue_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	.host_reset = ibmvscsi_host_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	.cmd_per_lun = IBMVSCSI_CMDS_PER_LUN_DEFAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	.can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	.this_id = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	.sg_tablesize = SG_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	.shost_attrs = ibmvscsi_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)  * ibmvscsi_get_desired_dma - Calculate IO memory desired by the driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105)  * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)  *	Number of bytes of IO data the driver will need to perform well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) static unsigned long ibmvscsi_get_desired_dma(struct vio_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	/* iu_storage data allocated in initialize_event_pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 	unsigned long desired_io = max_events * sizeof(union viosrp_iu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	/* add io space for sg data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	desired_io += (IBMVSCSI_MAX_SECTORS_DEFAULT * 512 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	                     IBMVSCSI_CMDS_PER_LUN_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	return desired_io;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) static void ibmvscsi_do_work(struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	char *action = "reset";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	switch (hostdata->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	case IBMVSCSI_HOST_ACTION_UNBLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	case IBMVSCSI_HOST_ACTION_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		rc = ibmvscsi_reset_crq_queue(&hostdata->queue, hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 			rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		vio_enable_interrupts(to_vio_dev(hostdata->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	case IBMVSCSI_HOST_ACTION_REENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		action = "enable";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 		rc = ibmvscsi_reenable_crq_queue(&hostdata->queue, hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 		if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 			rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	case IBMVSCSI_HOST_ACTION_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 		spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 	hostdata->action = IBMVSCSI_HOST_ACTION_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 		ibmvscsi_set_request_limit(hostdata, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 		dev_err(hostdata->dev, "error after %s\n", action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	scsi_unblock_requests(hostdata->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) static int __ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	if (kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	switch (hostdata->action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	case IBMVSCSI_HOST_ACTION_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	case IBMVSCSI_HOST_ACTION_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	case IBMVSCSI_HOST_ACTION_REENABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	case IBMVSCSI_HOST_ACTION_UNBLOCK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) static int ibmvscsi_work_to_do(struct ibmvscsi_host_data *hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	spin_lock_irqsave(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	rc = __ibmvscsi_work_to_do(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	spin_unlock_irqrestore(hostdata->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) static int ibmvscsi_work(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	struct ibmvscsi_host_data *hostdata = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 	set_user_nice(current, MIN_NICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		rc = wait_event_interruptible(hostdata->work_wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 					      ibmvscsi_work_to_do(hostdata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		BUG_ON(rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 		if (kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 		ibmvscsi_do_work(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218)  * Called by bus code for each adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	struct ibmvscsi_host_data *hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	struct Scsi_Host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	struct device *dev = &vdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	struct srp_rport_identifiers ids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	struct srp_rport *rport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 	unsigned long wait_switch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	dev_set_drvdata(&vdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	host = scsi_host_alloc(&driver_template, sizeof(*hostdata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	if (!host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		dev_err(&vdev->dev, "couldn't allocate host data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 		goto scsi_host_alloc_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	host->transportt = ibmvscsi_transport_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	hostdata = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	memset(hostdata, 0x00, sizeof(*hostdata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	INIT_LIST_HEAD(&hostdata->sent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	init_waitqueue_head(&hostdata->work_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	hostdata->host = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	hostdata->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	ibmvscsi_set_request_limit(hostdata, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 	if (map_persist_bufs(hostdata)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		dev_err(&vdev->dev, "couldn't map persistent buffers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		goto persist_bufs_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	hostdata->work_thread = kthread_run(ibmvscsi_work, hostdata, "%s_%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 					    "ibmvscsi", host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 	if (IS_ERR(hostdata->work_thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 		dev_err(&vdev->dev, "couldn't initialize kthread. rc=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 			PTR_ERR(hostdata->work_thread));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 		goto init_crq_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	rc = ibmvscsi_init_crq_queue(&hostdata->queue, hostdata, max_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	if (rc != 0 && rc != H_RESOURCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		goto kill_kthread;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	if (initialize_event_pool(&hostdata->pool, max_events, hostdata) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		dev_err(&vdev->dev, "couldn't initialize event pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		goto init_pool_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	host->max_lun = IBMVSCSI_MAX_LUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	host->max_id = max_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	host->max_channel = max_channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	host->max_cmd_len = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	dev_info(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		 "Maximum ID: %d Maximum LUN: %llu Maximum Channel: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		 host->max_id, host->max_lun, host->max_channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 	if (scsi_add_host(hostdata->host, hostdata->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 		goto add_host_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	/* we don't have a proper target_port_id so let's use the fake one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	memcpy(ids.port_id, hostdata->madapter_info.partition_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	       sizeof(ids.port_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	ids.roles = SRP_RPORT_ROLE_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	rport = srp_rport_add(host, &ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	if (IS_ERR(rport))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		goto add_srp_port_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	/* Try to send an initialization message.  Note that this is allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	 * to fail if the other end is not acive.  In that case we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 	 * want to scan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	    || rc == H_RESOURCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 		 * Wait around max init_timeout secs for the adapter to finish
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 		 * initializing. When we are done initializing, we will have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 		 * valid request_limit.  We don't want Linux scanning before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 		 * we are ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 		for (wait_switch = jiffies + (init_timeout * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 		     time_before(jiffies, wait_switch) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 		     atomic_read(&hostdata->request_limit) < 2;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 			msleep(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 		/* if we now have a valid request_limit, initiate a scan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 		if (atomic_read(&hostdata->request_limit) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 			scsi_scan_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	dev_set_drvdata(&vdev->dev, hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	spin_lock(&ibmvscsi_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	list_add_tail(&hostdata->host_list, &ibmvscsi_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	spin_unlock(&ibmvscsi_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322)       add_srp_port_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	scsi_remove_host(hostdata->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324)       add_host_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	release_event_pool(&hostdata->pool, hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)       init_pool_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, max_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)       kill_kthread:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329)       kthread_stop(hostdata->work_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)       init_crq_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 	unmap_persist_bufs(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)       persist_bufs_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	scsi_host_put(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)       scsi_host_alloc_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) static int ibmvscsi_remove(struct vio_dev *vdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	srp_remove_host(hostdata->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	scsi_remove_host(hostdata->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	purge_requests(hostdata, DID_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	release_event_pool(&hostdata->pool, hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 					max_events);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	kthread_stop(hostdata->work_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	unmap_persist_bufs(hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	spin_lock(&ibmvscsi_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 	list_del(&hostdata->host_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	spin_unlock(&ibmvscsi_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	scsi_host_put(hostdata->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364)  * ibmvscsi_resume: Resume from suspend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365)  * @dev:	device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367)  * We may have lost an interrupt across suspend/resume, so kick the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368)  * interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) static int ibmvscsi_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	vio_disable_interrupts(to_vio_dev(hostdata->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	tasklet_schedule(&hostdata->srp_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380)  * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)  * support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) static const struct vio_device_id ibmvscsi_device_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 	{"vscsi", "IBM,v-scsi"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 	{ "", "" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) static const struct dev_pm_ops ibmvscsi_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	.resume = ibmvscsi_resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) static struct vio_driver ibmvscsi_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	.id_table = ibmvscsi_device_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	.probe = ibmvscsi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	.remove = ibmvscsi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	.get_desired_dma = ibmvscsi_get_desired_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	.name = "ibmvscsi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	.pm = &ibmvscsi_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) static struct srp_function_template ibmvscsi_transport_functions = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) static int __init ibmvscsi_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	/* Ensure we have two requests to do error recovery */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	driver_template.can_queue = max_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	max_events = max_requests + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 	if (!firmware_has_feature(FW_FEATURE_VIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	ibmvscsi_transport_template =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 		srp_attach_transport(&ibmvscsi_transport_functions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	if (!ibmvscsi_transport_template)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	ret = vio_register_driver(&ibmvscsi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		srp_release_transport(ibmvscsi_transport_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) static void __exit ibmvscsi_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	vio_unregister_driver(&ibmvscsi_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	srp_release_transport(ibmvscsi_transport_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) module_init(ibmvscsi_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) module_exit(ibmvscsi_module_exit);