Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * Linux driver for VMware's para-virtualized SCSI HBA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * This program is free software; you can redistribute it and/or modify it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * under the terms of the GNU General Public License as published by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Free Software Foundation; version 2 of the License and no later version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * This program is distributed in the hope that it will be useful, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * NON INFRINGEMENT.  See the GNU General Public License for more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * You should have received a copy of the GNU General Public License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * along with this program; if not, write to the Free Software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * Maintained by: Jim Gill <jgill@vmware.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include "vmw_pvscsi.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) MODULE_AUTHOR("VMware, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define PVSCSI_DEFAULT_NUM_PAGES_PER_RING	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define PVSCSI_DEFAULT_QUEUE_DEPTH		254
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define SGL_SIZE				PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) struct pvscsi_sg_list {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) struct pvscsi_ctx {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	 * The index of the context in cmd_map serves as the context ID for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	 * 1-to-1 mapping completions back to requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	struct scsi_cmnd	*cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	struct pvscsi_sg_list	*sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	struct list_head	list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	dma_addr_t		dataPA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	dma_addr_t		sensePA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	dma_addr_t		sglPA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	struct completion	*abort_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) struct pvscsi_adapter {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	char				*mmioBase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	u8				rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	bool				use_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	bool				use_req_threshold;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	spinlock_t			hw_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	struct workqueue_struct		*workqueue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	struct work_struct		work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 	struct PVSCSIRingReqDesc	*req_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	unsigned			req_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	unsigned			req_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	dma_addr_t			reqRingPA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	struct PVSCSIRingCmpDesc	*cmp_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	unsigned			cmp_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	dma_addr_t			cmpRingPA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	struct PVSCSIRingMsgDesc	*msg_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	unsigned			msg_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	dma_addr_t			msgRingPA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	struct PVSCSIRingsState		*rings_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	dma_addr_t			ringStatePA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	struct pci_dev			*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	struct Scsi_Host		*host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	struct list_head		cmd_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	struct pvscsi_ctx		*cmd_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) /* Command line parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) static int pvscsi_ring_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) static int pvscsi_cmd_per_lun    = PVSCSI_DEFAULT_QUEUE_DEPTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) static bool pvscsi_disable_msi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) static bool pvscsi_disable_msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) static bool pvscsi_use_msg       = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) static bool pvscsi_use_req_threshold = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) #define PVSCSI_RW (S_IRUSR | S_IWUSR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 		 __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 		 "[up to 16 targets],"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		 __stringify(PVSCSI_SETUP_RINGS_MAX_NUM_PAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		 "[for 16+ targets])");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 		 __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		 __stringify(PVSCSI_DEFAULT_QUEUE_DEPTH) ")");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) module_param_named(use_req_threshold, pvscsi_use_req_threshold,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		   bool, PVSCSI_RW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) MODULE_PARM_DESC(use_req_threshold, "Use driver-based request coalescing if configured - (default=1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) static const struct pci_device_id pvscsi_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	{ PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	{ 0 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) static struct device *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) pvscsi_dev(const struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	return &(adapter->dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) static struct pvscsi_ctx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	struct pvscsi_ctx *ctx, *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	end = &adapter->cmd_map[adapter->req_depth];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	for (ctx = adapter->cmd_map; ctx < end; ctx++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		if (ctx->cmd == cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 			return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) static struct pvscsi_ctx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	struct pvscsi_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	if (list_empty(&adapter->cmd_pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	ctx->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	list_del(&ctx->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	return ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) static void pvscsi_release_context(struct pvscsi_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 				   struct pvscsi_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	ctx->cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	ctx->abort_cmp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	list_add(&ctx->list, &adapter->cmd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  * non-zero integer. ctx always points to an entry in cmd_map array, hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  * the return value is always >=1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 			      const struct pvscsi_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	return ctx - adapter->cmd_map + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static struct pvscsi_ctx *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	return &adapter->cmd_map[context - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 			     u32 offset, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	writel(val, adapter->mmioBase + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	return readl(adapter->mmioBase + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 				     u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	u32 intr_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	intr_bits = PVSCSI_INTR_CMPL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	if (adapter->use_msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		intr_bits |= PVSCSI_INTR_MSG_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 				  u32 cmd, const void *desc, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	const u32 *ptr = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	len /= sizeof(*ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	for (i = 0; i < len; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		pvscsi_reg_write(adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 				 PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 			     const struct pvscsi_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	struct PVSCSICmdDescAbortCmd cmd = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	cmd.target = ctx->cmd->device->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	cmd.context = pvscsi_map_context(adapter, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) static int scsi_is_rw(unsigned char op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	return op == READ_6  || op == WRITE_6 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	       op == READ_10 || op == WRITE_10 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	       op == READ_12 || op == WRITE_12 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	       op == READ_16 || op == WRITE_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 			   unsigned char op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	if (scsi_is_rw(op)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		struct PVSCSIRingsState *s = adapter->rings_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		if (!adapter->use_req_threshold ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		    s->reqProdIdx - s->reqConsIdx >= s->reqCallThreshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 			pvscsi_kick_rw_io(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		pvscsi_process_request_ring(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) static void ll_bus_reset(const struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	dev_dbg(pvscsi_dev(adapter), "Resetting bus on %p\n", adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	struct PVSCSICmdDescResetDevice cmd = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	dev_dbg(pvscsi_dev(adapter), "Resetting device: target=%u\n", target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	cmd.target = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 			      &cmd, sizeof(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 			     struct scatterlist *sg, unsigned count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	struct PVSCSISGElement *sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	sge = &ctx->sgl->sge[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	for (i = 0; i < count; i++, sg = sg_next(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 		sge[i].addr   = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 		sge[i].length = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 		sge[i].flags  = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346)  * Map all data buffers for a command into PCI space and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347)  * setup the scatter/gather list if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 			      struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			      struct PVSCSIRingReqDesc *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	unsigned count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	unsigned bufflen = scsi_bufflen(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	e->dataLen = bufflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	e->dataAddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	if (bufflen == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	sg = scsi_sglist(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	count = scsi_sg_count(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	if (count != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		int segs = scsi_dma_map(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		if (segs == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			scmd_printk(KERN_DEBUG, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 				    "vmw_pvscsi: Failed to map cmd sglist for DMA.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		} else if (segs > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 			pvscsi_create_sg(ctx, sg, segs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 			e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 			ctx->sglPA = dma_map_single(&adapter->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 					ctx->sgl, SGL_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 				scmd_printk(KERN_ERR, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 					    "vmw_pvscsi: Failed to map ctx sglist for DMA.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 				scsi_dma_unmap(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 				ctx->sglPA = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 			e->dataAddr = ctx->sglPA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 			e->dataAddr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		 * In case there is no S/G list, scsi_sglist points
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		 * directly to the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 					     cmd->sc_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 			scmd_printk(KERN_DEBUG, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 				    "vmw_pvscsi: Failed to map direct data buffer for DMA.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		e->dataAddr = ctx->dataPA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406)  * The device incorrectly doesn't clear the first byte of the sense
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407)  * buffer in some cases. We have to do it ourselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408)  * Otherwise we run into trouble when SWIOTLB is forced.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) static void pvscsi_patch_sense(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	if (cmd->sense_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		cmd->sense_buffer[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 				 struct pvscsi_ctx *ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	struct scsi_cmnd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	unsigned bufflen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	cmd = ctx->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	bufflen = scsi_bufflen(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	if (bufflen != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		unsigned count = scsi_sg_count(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		if (count != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 			scsi_dma_unmap(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			if (ctx->sglPA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 				dma_unmap_single(&adapter->dev->dev, ctx->sglPA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 						 SGL_SIZE, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 				ctx->sglPA = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 			dma_unmap_single(&adapter->dev->dev, ctx->dataPA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 					 bufflen, cmd->sc_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	if (cmd->sense_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 				 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 			&adapter->ringStatePA, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	if (!adapter->rings_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 				 pvscsi_ring_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	adapter->req_depth = adapter->req_pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 					* PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	adapter->req_ring = dma_alloc_coherent(&adapter->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	if (!adapter->req_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 				 pvscsi_ring_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	adapter->cmp_ring = dma_alloc_coherent(&adapter->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 			GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	if (!adapter->cmp_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	if (!adapter->use_msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 				 pvscsi_msg_ring_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	adapter->msg_ring = dma_alloc_coherent(&adapter->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 			adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 			GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	if (!adapter->msg_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	struct PVSCSICmdDescSetupRings cmd = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	dma_addr_t base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	cmd.ringsStatePPN   = adapter->ringStatePA >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	cmd.reqRingNumPages = adapter->req_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	cmd.cmpRingNumPages = adapter->cmp_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	base = adapter->reqRingPA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	for (i = 0; i < adapter->req_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		base += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	base = adapter->cmpRingPA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	for (i = 0; i < adapter->cmp_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		base += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	memset(adapter->rings_state, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 			      &cmd, sizeof(cmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	if (adapter->use_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		cmd_msg.numPages = adapter->msg_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		base = adapter->msgRingPA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		for (i = 0; i < adapter->msg_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 			base += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 				      &cmd_msg, sizeof(cmd_msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) static int pvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	if (!sdev->tagged_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		qdepth = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	return scsi_change_queue_depth(sdev, qdepth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542)  * Pull a completion descriptor off and pass the completion back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543)  * to the SCSI mid layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 				    const struct PVSCSIRingCmpDesc *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	struct pvscsi_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	struct scsi_cmnd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	struct completion *abort_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	u32 btstat = e->hostStatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	u32 sdstat = e->scsiStatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	ctx = pvscsi_get_context(adapter, e->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	cmd = ctx->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	abort_cmp = ctx->abort_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	pvscsi_unmap_buffers(adapter, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	if (sdstat != SAM_STAT_CHECK_CONDITION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		pvscsi_patch_sense(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	pvscsi_release_context(adapter, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	if (abort_cmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		 * The command was requested to be aborted. Just signal that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		 * the request completed and swallow the actual cmd completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		 * here. The abort handler will post a completion for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		 * command indicating that it got successfully aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		complete(abort_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	cmd->result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	if (sdstat != SAM_STAT_GOOD &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	    (btstat == BTSTAT_SUCCESS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	     btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	     btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		if (sdstat == SAM_STAT_COMMAND_TERMINATED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 			cmd->result = (DID_RESET << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 			cmd->result = (DID_OK << 16) | sdstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 			if (sdstat == SAM_STAT_CHECK_CONDITION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 			    cmd->sense_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 				cmd->result |= (DRIVER_SENSE << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		switch (btstat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		case BTSTAT_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		case BTSTAT_LINKED_COMMAND_COMPLETED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 			 * Commands like INQUIRY may transfer less data than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 			 * requested by the initiator via bufflen. Set residual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 			 * count to make upper layer aware of the actual amount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 			 * of data returned. There are cases when controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 			 * returns zero dataLen with non zero data - do not set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 			 * residual count in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 			if (e->dataLen && (e->dataLen < scsi_bufflen(cmd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 				scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 			cmd->result = (DID_OK << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		case BTSTAT_DATARUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		case BTSTAT_DATA_UNDERRUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			/* Report residual data in underruns */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			cmd->result = (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		case BTSTAT_SELTIMEO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			/* Our emulation returns this for non-connected devs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			cmd->result = (DID_BAD_TARGET << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		case BTSTAT_LUNMISMATCH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		case BTSTAT_TAGREJECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		case BTSTAT_BADMSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			cmd->result = (DRIVER_INVALID << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		case BTSTAT_HAHARDWARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		case BTSTAT_INVPHASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		case BTSTAT_HATIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		case BTSTAT_NORESPONSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		case BTSTAT_DISCONNECT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		case BTSTAT_HASOFTWARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		case BTSTAT_BUSFREE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		case BTSTAT_SENSFAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			cmd->result |= (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		case BTSTAT_SENTRST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		case BTSTAT_RECVRST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		case BTSTAT_BUSRESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 			cmd->result = (DID_RESET << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		case BTSTAT_ABORTQUEUE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			cmd->result = (DID_BUS_BUSY << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		case BTSTAT_SCSIPARITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			cmd->result = (DID_PARITY << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 			cmd->result = (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 			scmd_printk(KERN_DEBUG, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 				    "Unknown completion status: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 				    btstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	dev_dbg(&cmd->device->sdev_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		"cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	cmd->scsi_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661)  * barrier usage : Since the PVSCSI device is emulated, there could be cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662)  * where we may want to serialize some accesses between the driver and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663)  * emulation layer. We use compiler barriers instead of the more expensive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664)  * memory barriers because PVSCSI is only supported on X86 which has strong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665)  * memory access ordering.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	struct PVSCSIRingsState *s = adapter->rings_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	u32 cmp_entries = s->cmpNumEntriesLog2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	while (s->cmpConsIdx != s->cmpProdIdx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 						      MASK(cmp_entries));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		 * This barrier() ensures that *e is not dereferenced while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 		 * the device emulation still writes data into the slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		 * Since the device emulation advances s->cmpProdIdx only after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		 * updating the slot we want to check it first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		pvscsi_complete_request(adapter, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		 * This barrier() ensures that compiler doesn't reorder write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		 * to s->cmpConsIdx before the read of (*e) inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		 * pvscsi_complete_request. Otherwise, device emulation may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		 * overwrite *e before we had a chance to read it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		s->cmpConsIdx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696)  * Translate a Linux SCSI request into a request ring entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 			     struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	struct PVSCSIRingsState *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	struct PVSCSIRingReqDesc *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	u32 req_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	s = adapter->rings_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	sdev = cmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	req_entries = s->reqNumEntriesLog2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	 * If this condition holds, we might have room on the request ring, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	 * we might not have room on the completion ring for the response.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	 * However, we have already ruled out this possibility - we would not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	 * have successfully allocated a context if it were true, since we only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	 * have one context per request entry.  Check for it anyway, since it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	 * would be a serious bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			    "ring full: reqProdIdx=%d cmpConsIdx=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			    s->reqProdIdx, s->cmpConsIdx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	e->bus    = sdev->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	e->target = sdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	memset(e->lun, 0, sizeof(e->lun));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	e->lun[1] = sdev->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	if (cmd->sense_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		ctx->sensePA = dma_map_single(&adapter->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 				cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 				DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			scmd_printk(KERN_DEBUG, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 				    "vmw_pvscsi: Failed to map sense buffer for DMA.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 			ctx->sensePA = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		e->senseAddr = ctx->sensePA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		e->senseLen = SCSI_SENSE_BUFFERSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		e->senseLen  = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		e->senseAddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	e->cdbLen   = cmd->cmd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	e->vcpuHint = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	memcpy(e->cdb, cmd->cmnd, e->cdbLen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	e->tag = SIMPLE_QUEUE_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	if (cmd->sc_data_direction == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	else if (cmd->sc_data_direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	else if (cmd->sc_data_direction == DMA_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		e->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		if (cmd->sense_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 					 SCSI_SENSE_BUFFERSIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 					 DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			ctx->sensePA = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	e->context = pvscsi_map_context(adapter, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	s->reqProdIdx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	struct Scsi_Host *host = cmd->device->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	struct pvscsi_adapter *adapter = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	struct pvscsi_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	unsigned char op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	spin_lock_irqsave(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	ctx = pvscsi_acquire_context(adapter, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		if (ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			pvscsi_release_context(adapter, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		spin_unlock_irqrestore(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	cmd->scsi_done = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	op = cmd->cmnd[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	dev_dbg(&cmd->device->sdev_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		"queued cmd %p, ctx %p, op=%x\n", cmd, ctx, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	spin_unlock_irqrestore(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	pvscsi_kick_io(adapter, op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) static DEF_SCSI_QCMD(pvscsi_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) static int pvscsi_abort(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	struct pvscsi_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	int result = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	DECLARE_COMPLETION_ONSTACK(abort_cmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	int done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		    adapter->host->host_no, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	spin_lock_irqsave(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	 * Poll the completion ring first - we might be trying to abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	 * a command that is waiting to be dispatched in the completion ring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	pvscsi_process_completion_ring(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	 * If there is no context for the command, it either already succeeded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	 * or else was never properly issued.  Not our problem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	ctx = pvscsi_find_context(adapter, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (!ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	 * Mark that the command has been requested to be aborted and issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	 * the abort.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	ctx->abort_cmp = &abort_cmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	pvscsi_abort_cmd(adapter, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	spin_unlock_irqrestore(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	/* Wait for 2 secs for the completion. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	done = wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	spin_lock_irqsave(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	if (!done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		 * Failed to abort the command, unmark the fact that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		 * was requested to be aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		ctx->abort_cmp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		result = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		scmd_printk(KERN_DEBUG, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 			    "Failed to get completion for aborted cmd %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			    cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	 * Successfully aborted the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	cmd->result = (DID_ABORT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	cmd->scsi_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	spin_unlock_irqrestore(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882)  * Abort all outstanding requests.  This is only safe to use if the completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883)  * ring will never be walked again or the device has been reset, because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884)  * destroys the 1-1 mapping between context field passed to emulation and our
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885)  * request structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	for (i = 0; i < adapter->req_depth; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		struct scsi_cmnd *cmd = ctx->cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		if (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			scmd_printk(KERN_ERR, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 				    "Forced reset on cmd %p\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 			pvscsi_unmap_buffers(adapter, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			pvscsi_patch_sense(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			pvscsi_release_context(adapter, ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 			cmd->result = (DID_RESET << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			cmd->scsi_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) static int pvscsi_host_reset(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	struct Scsi_Host *host = cmd->device->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	struct pvscsi_adapter *adapter = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	bool use_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	spin_lock_irqsave(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	use_msg = adapter->use_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	if (use_msg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		adapter->use_msg = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		spin_unlock_irqrestore(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 		 * Now that we know that the ISR won't add more work on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		 * workqueue we can safely flush any outstanding work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		flush_workqueue(adapter->workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		spin_lock_irqsave(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	 * We're going to tear down the entire ring structure and set it back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	 * up, so stalling new requests until all completions are flushed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	 * the rings are back in place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	pvscsi_process_request_ring(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	ll_adapter_reset(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	 * Now process any completions.  Note we do this AFTER adapter reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	 * which is strange, but stops races where completions get posted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	 * between processing the ring and issuing the reset.  The backend will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	 * not touch the ring memory after reset, so the immediately pre-reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	 * completion ring state is still valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	pvscsi_process_completion_ring(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	pvscsi_reset_all(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	adapter->use_msg = use_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	pvscsi_setup_all_rings(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	pvscsi_unmask_intr(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	spin_unlock_irqrestore(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	struct Scsi_Host *host = cmd->device->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	struct pvscsi_adapter *adapter = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	 * We don't want to queue new requests for this bus after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	 * flushing all pending requests to emulation, since new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	 * requests could then sneak in during this bus reset phase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	 * so take the lock now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	spin_lock_irqsave(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	pvscsi_process_request_ring(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	ll_bus_reset(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	pvscsi_process_completion_ring(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	spin_unlock_irqrestore(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) static int pvscsi_device_reset(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	struct Scsi_Host *host = cmd->device->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	struct pvscsi_adapter *adapter = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		    host->host_no, cmd->device->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	 * We don't want to queue new requests for this device after flushing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	 * all pending requests to emulation, since new requests could then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	 * sneak in during this device reset phase, so take the lock now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	spin_lock_irqsave(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	pvscsi_process_request_ring(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	ll_device_reset(adapter, cmd->device->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	pvscsi_process_completion_ring(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	spin_unlock_irqrestore(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static struct scsi_host_template pvscsi_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) static const char *pvscsi_info(struct Scsi_Host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	struct pvscsi_adapter *adapter = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	static char buf[256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		"%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		pvscsi_template.cmd_per_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static struct scsi_host_template pvscsi_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	.module				= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	.name				= "VMware PVSCSI Host Adapter",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	.proc_name			= "vmw_pvscsi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	.info				= pvscsi_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	.queuecommand			= pvscsi_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	.this_id			= -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	.sg_tablesize			= PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	.dma_boundary			= UINT_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	.max_sectors			= 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	.change_queue_depth		= pvscsi_change_queue_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	.eh_abort_handler		= pvscsi_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	.eh_device_reset_handler	= pvscsi_device_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	.eh_bus_reset_handler		= pvscsi_bus_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	.eh_host_reset_handler		= pvscsi_host_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 			       const struct PVSCSIRingMsgDesc *e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	struct PVSCSIRingsState *s = adapter->rings_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	struct Scsi_Host *host = adapter->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	       e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	if (e->type == PVSCSI_MSG_DEV_ADDED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		struct PVSCSIMsgDescDevStatusChanged *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		printk(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		       "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		       desc->bus, desc->target, desc->lun[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		if (!scsi_host_get(host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		sdev = scsi_device_lookup(host, desc->bus, desc->target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 					  desc->lun[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 		if (sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			printk(KERN_INFO "vmw_pvscsi: device already exists\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 			scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			scsi_add_device(adapter->host, desc->bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 					desc->target, desc->lun[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		scsi_host_put(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	} else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		struct PVSCSIMsgDescDevStatusChanged *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		printk(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		       "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		       desc->bus, desc->target, desc->lun[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		if (!scsi_host_get(host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		sdev = scsi_device_lookup(host, desc->bus, desc->target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 					  desc->lun[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		if (sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			scsi_remove_device(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			printk(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 			       "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			       desc->bus, desc->target, desc->lun[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		scsi_host_put(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	struct PVSCSIRingsState *s = adapter->rings_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	return s->msgProdIdx != s->msgConsIdx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	struct PVSCSIRingsState *s = adapter->rings_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	struct PVSCSIRingMsgDesc *ring = adapter->msg_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	u32 msg_entries = s->msgNumEntriesLog2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	while (pvscsi_msg_pending(adapter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 						      MASK(msg_entries));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		pvscsi_process_msg(adapter, e);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		s->msgConsIdx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) static void pvscsi_msg_workqueue_handler(struct work_struct *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	struct pvscsi_adapter *adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	adapter = container_of(data, struct pvscsi_adapter, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	pvscsi_process_msg_ring(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	char name[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	if (!pvscsi_use_msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			 PVSCSI_CMD_SETUP_MSG_RING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	snprintf(name, sizeof(name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		 "vmw_pvscsi_wq_%u", adapter->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	adapter->workqueue = create_singlethread_workqueue(name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	if (!adapter->workqueue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 				      bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	if (!pvscsi_use_req_threshold)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			 PVSCSI_CMD_SETUP_REQCALLTHRESHOLD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	val = pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	if (val == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		printk(KERN_INFO "vmw_pvscsi: device does not support req_threshold\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		struct PVSCSICmdDescSetupReqCall cmd_msg = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		cmd_msg.enable = enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		printk(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		       "vmw_pvscsi: %sabling reqCallThreshold\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 			enable ? "en" : "dis");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		pvscsi_write_cmd_desc(adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 				      PVSCSI_CMD_SETUP_REQCALLTHRESHOLD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 				      &cmd_msg, sizeof(cmd_msg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		return pvscsi_reg_read(adapter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 				       PVSCSI_REG_OFFSET_COMMAND_STATUS) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static irqreturn_t pvscsi_isr(int irq, void *devp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	struct pvscsi_adapter *adapter = devp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	spin_lock_irqsave(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	pvscsi_process_completion_ring(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	if (adapter->use_msg && pvscsi_msg_pending(adapter))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		queue_work(adapter->workqueue, &adapter->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	spin_unlock_irqrestore(&adapter->hw_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) static irqreturn_t pvscsi_shared_isr(int irq, void *devp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	struct pvscsi_adapter *adapter = devp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	u32 val = pvscsi_read_intr_status(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	if (!(val & PVSCSI_INTR_ALL_SUPPORTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	pvscsi_write_intr_status(devp, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	return pvscsi_isr(irq, devp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	struct pvscsi_ctx *ctx = adapter->cmd_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	unsigned i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	for (i = 0; i < adapter->req_depth; ++i, ++ctx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	free_irq(pci_irq_vector(adapter->dev, 0), adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	pci_free_irq_vectors(adapter->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	if (adapter->workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		destroy_workqueue(adapter->workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	if (adapter->mmioBase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		pci_iounmap(adapter->dev, adapter->mmioBase);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	pci_release_regions(adapter->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	if (adapter->cmd_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		pvscsi_free_sgls(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		kfree(adapter->cmd_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	if (adapter->rings_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		dma_free_coherent(&adapter->dev->dev, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 				    adapter->rings_state, adapter->ringStatePA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	if (adapter->req_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		dma_free_coherent(&adapter->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 				    adapter->req_pages * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 				    adapter->req_ring, adapter->reqRingPA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	if (adapter->cmp_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		dma_free_coherent(&adapter->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 				    adapter->cmp_pages * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 				    adapter->cmp_ring, adapter->cmpRingPA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	if (adapter->msg_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		dma_free_coherent(&adapter->dev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 				    adapter->msg_pages * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 				    adapter->msg_ring, adapter->msgRingPA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)  * Allocate scatter gather lists.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)  * These are statically allocated.  Trying to be clever was not worth it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)  * Dynamic allocation can fail, and we can't go deep into the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)  * allocator, since we're a SCSI driver, and trying too hard to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)  * memory might generate disk I/O.  We also don't want to fail disk I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)  * in that case because we can't get an allocation - the I/O could be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)  * trying to swap out data to free memory.  Since that is pathological,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272)  * just use a statically allocated scatter list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) static int pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	struct pvscsi_ctx *ctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	ctx = adapter->cmd_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		ctx->sgl = (void *)__get_free_pages(GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 						    get_order(SGL_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		ctx->sglPA = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		if (!ctx->sgl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 			for (; i >= 0; --i, --ctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 				free_pages((unsigned long)ctx->sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 					   get_order(SGL_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 				ctx->sgl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302)  * Query the device, fetch the config info and return the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)  * maximum number of targets on the adapter. In case of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304)  * failure due to any reason return default i.e. 16.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	struct PVSCSICmdDescConfigCmd cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	struct PVSCSIConfigPageHeader *header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	dma_addr_t configPagePA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	void *config_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	u32 numPhys = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	dev = pvscsi_dev(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 			&configPagePA, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	if (!config_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	BUG_ON(configPagePA & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	/* Fetch config info from the device. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	cmd.cmpAddr = configPagePA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	cmd._pad = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	 * Mark the completion page header with error values. If the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	 * completes the command successfully, it sets the status values to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	 * indicate success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	header = config_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	memset(header, 0, sizeof *header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	header->hostStatus = BTSTAT_INVPARAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	header->scsiStatus = SDSTAT_CHECK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	if (header->hostStatus == BTSTAT_SUCCESS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	    header->scsiStatus == SDSTAT_GOOD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		struct PVSCSIConfigPageController *config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		config = config_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		numPhys = config->numPhys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 			 header->hostStatus, header->scsiStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 			  configPagePA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	return numPhys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	unsigned int irq_flag = PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	struct pvscsi_adapter *adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	struct pvscsi_adapter adapter_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	struct Scsi_Host *host = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	u32 max_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	error = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	if (pci_enable_device(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	} else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		goto out_disable_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	 * Let's use a temp pvscsi_adapter struct until we find the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	 * targets on the adapter, after that we will switch to the real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	 * allocated struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	adapter = &adapter_temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	memset(adapter, 0, sizeof(*adapter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	adapter->dev  = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	adapter->rev = pdev->revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	if (pci_request_regions(pdev, "vmw_pvscsi")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		goto out_disable_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	if (i == DEVICE_COUNT_RESOURCE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		       "vmw_pvscsi: adapter has no suitable MMIO region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		goto out_release_resources_and_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	if (!adapter->mmioBase) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		       "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		       i, PVSCSI_MEM_SPACE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		goto out_release_resources_and_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	 * Ask the device for max number of targets before deciding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	 * default pvscsi_ring_pages value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	max_id = pvscsi_get_max_targets(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	printk(KERN_INFO "vmw_pvscsi: max_id: %u\n", max_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	if (pvscsi_ring_pages == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		 * Set the right default value. Up to 16 it is 8, above it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		 * max.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		pvscsi_ring_pages = (max_id > 16) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 			PVSCSI_SETUP_RINGS_MAX_NUM_PAGES :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 			PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	printk(KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	       "vmw_pvscsi: setting ring_pages to %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	       pvscsi_ring_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	pvscsi_template.can_queue =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	pvscsi_template.cmd_per_lun =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	if (!host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		goto out_release_resources_and_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	 * Let's use the real pvscsi_adapter struct here onwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	adapter = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	memset(adapter, 0, sizeof(*adapter));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	adapter->dev  = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	adapter->host = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	 * Copy back what we already have to the allocated adapter struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	adapter->rev = adapter_temp.rev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	adapter->mmioBase = adapter_temp.mmioBase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	spin_lock_init(&adapter->hw_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	host->max_channel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	host->max_lun     = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	host->max_cmd_len = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	host->max_id      = max_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	pci_set_drvdata(pdev, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	ll_adapter_reset(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	error = pvscsi_allocate_rings(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		goto out_release_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	 * From this point on we should reset the adapter if anything goes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	 * wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	pvscsi_setup_all_rings(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	adapter->cmd_map = kcalloc(adapter->req_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 				   sizeof(struct pvscsi_ctx), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	if (!adapter->cmd_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 		printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		error = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		goto out_reset_adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	INIT_LIST_HEAD(&adapter->cmd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	for (i = 0; i < adapter->req_depth; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		struct pvscsi_ctx *ctx = adapter->cmd_map + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		list_add(&ctx->list, &adapter->cmd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	error = pvscsi_allocate_sg(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		goto out_reset_adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	if (pvscsi_disable_msix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		irq_flag &= ~PCI_IRQ_MSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	if (pvscsi_disable_msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		irq_flag &= ~PCI_IRQ_MSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	if (error < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		goto out_reset_adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	       adapter->use_req_threshold ? "en" : "dis");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	if (adapter->dev->msix_enabled || adapter->dev->msi_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		printk(KERN_INFO "vmw_pvscsi: using MSI%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 			adapter->dev->msix_enabled ? "-X" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		error = request_irq(pci_irq_vector(pdev, 0), pvscsi_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 				0, "vmw_pvscsi", adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		printk(KERN_INFO "vmw_pvscsi: using INTx\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		error = request_irq(pci_irq_vector(pdev, 0), pvscsi_shared_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 				IRQF_SHARED, "vmw_pvscsi", adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		       "vmw_pvscsi: unable to request IRQ: %d\n", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		goto out_reset_adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	error = scsi_add_host(host, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 		       "vmw_pvscsi: scsi_add_host failed: %d\n", error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		goto out_reset_adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		 adapter->rev, host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	pvscsi_unmask_intr(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	scsi_scan_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) out_reset_adapter:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	ll_adapter_reset(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) out_release_resources:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	pvscsi_shutdown_intr(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	pvscsi_release_resources(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	scsi_host_put(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) out_disable_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) out_release_resources_and_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	pvscsi_shutdown_intr(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	pvscsi_release_resources(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	goto out_disable_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	pvscsi_mask_intr(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	if (adapter->workqueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		flush_workqueue(adapter->workqueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	pvscsi_shutdown_intr(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	pvscsi_process_request_ring(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	pvscsi_process_completion_ring(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	ll_adapter_reset(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) static void pvscsi_shutdown(struct pci_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	struct Scsi_Host *host = pci_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	struct pvscsi_adapter *adapter = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	__pvscsi_shutdown(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) static void pvscsi_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	struct Scsi_Host *host = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	struct pvscsi_adapter *adapter = shost_priv(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	scsi_remove_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	__pvscsi_shutdown(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	pvscsi_release_resources(adapter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	scsi_host_put(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) static struct pci_driver pvscsi_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	.name		= "vmw_pvscsi",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	.id_table	= pvscsi_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	.probe		= pvscsi_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	.remove		= pvscsi_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	.shutdown       = pvscsi_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) static int __init pvscsi_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	pr_info("%s - version %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	return pci_register_driver(&pvscsi_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) static void __exit pvscsi_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	pci_unregister_driver(&pvscsi_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) module_init(pvscsi_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) module_exit(pvscsi_exit);