Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Based on the original DAC960 driver,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Portions Copyright 2002 by Mylex (An IBM Business Unit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/raid_class.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "myrb.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) static struct raid_template *myrb_raid_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) static void myrb_monitor(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) static inline void myrb_translate_devstate(void *DeviceState);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) static inline int myrb_logical_channel(struct Scsi_Host *shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 	return shost->max_channel - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) static struct myrb_devstate_name_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 	enum myrb_devstate state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) } myrb_devstate_name_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 	{ MYRB_DEVICE_DEAD, "Dead" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 	{ MYRB_DEVICE_WO, "WriteOnly" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	{ MYRB_DEVICE_ONLINE, "Online" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 	{ MYRB_DEVICE_CRITICAL, "Critical" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	{ MYRB_DEVICE_STANDBY, "Standby" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 	{ MYRB_DEVICE_OFFLINE, "Offline" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) static const char *myrb_devstate_name(enum myrb_devstate state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 		if (entry[i].state == state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 			return entry[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	return "Unknown";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) static struct myrb_raidlevel_name_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	enum myrb_raidlevel level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) } myrb_raidlevel_name_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	{ MYRB_RAID_LEVEL0, "RAID0" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	{ MYRB_RAID_LEVEL1, "RAID1" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	{ MYRB_RAID_LEVEL3, "RAID3" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	{ MYRB_RAID_LEVEL5, "RAID5" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	{ MYRB_RAID_LEVEL6, "RAID6" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	{ MYRB_RAID_JBOD, "JBOD" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 		if (entry[i].level == level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 			return entry[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  * myrb_create_mempools - allocates auxiliary data structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  * Return: true on success, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	size_t elem_size, elem_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	elem_align = sizeof(struct myrb_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	elem_size = cb->host->sg_tablesize * elem_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 				      elem_size, elem_align, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	if (cb->sg_pool == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 		shost_printk(KERN_ERR, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 			     "Failed to allocate SG pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 				       sizeof(struct myrb_dcdb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 				       sizeof(unsigned int), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	if (!cb->dcdb_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 		dma_pool_destroy(cb->sg_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		cb->sg_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		shost_printk(KERN_ERR, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 			     "Failed to allocate DCDB pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	snprintf(cb->work_q_name, sizeof(cb->work_q_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 		 "myrb_wq_%d", cb->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	cb->work_q = create_singlethread_workqueue(cb->work_q_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	if (!cb->work_q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 		dma_pool_destroy(cb->dcdb_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		cb->dcdb_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		dma_pool_destroy(cb->sg_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 		cb->sg_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 		shost_printk(KERN_ERR, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 			     "Failed to create workqueue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	 * Initialize the Monitoring Timer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  * myrb_destroy_mempools - tears down the memory pools for the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) static void myrb_destroy_mempools(struct myrb_hba *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	cancel_delayed_work_sync(&cb->monitor_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	destroy_workqueue(cb->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	dma_pool_destroy(cb->sg_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	dma_pool_destroy(cb->dcdb_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  * myrb_reset_cmd - reset command block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	memset(mbox, 0, sizeof(union myrb_cmd_mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	cmd_blk->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  * myrb_qcmd - queues command block for execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	void __iomem *base = cb->io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	cb->write_cmd_mbox(next_mbox, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	if (cb->prev_cmd_mbox1->words[0] == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	    cb->prev_cmd_mbox2->words[0] == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		cb->get_cmd_mbox(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	cb->prev_cmd_mbox1 = next_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	if (++next_mbox > cb->last_cmd_mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		next_mbox = cb->first_cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	cb->next_cmd_mbox = next_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  * myrb_exec_cmd - executes command block and waits for completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  * Return: command status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		struct myrb_cmdblk *cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	DECLARE_COMPLETION_ONSTACK(cmpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	cmd_blk->completion = &cmpl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	spin_lock_irqsave(&cb->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	cb->qcmd(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	spin_unlock_irqrestore(&cb->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	WARN_ON(in_interrupt());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	wait_for_completion(&cmpl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	return cmd_blk->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203)  * myrb_exec_type3 - executes a type 3 command and waits for completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205)  * Return: command status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) static unsigned short myrb_exec_type3(struct myrb_hba *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		enum myrb_cmd_opcode op, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	mutex_lock(&cb->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	myrb_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	mbox->type3.id = MYRB_DCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	mbox->type3.opcode = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	mbox->type3.addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	status = myrb_exec_cmd(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	mutex_unlock(&cb->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  * myrb_exec_type3D - executes a type 3D command and waits for completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  * Return: command status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		enum myrb_cmd_opcode op, struct scsi_device *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		struct myrb_pdev_state *pdev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	dma_addr_t pdev_info_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 					sizeof(struct myrb_pdev_state),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 					DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		return MYRB_STATUS_SUBSYS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	mutex_lock(&cb->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	myrb_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	mbox->type3D.id = MYRB_DCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	mbox->type3D.opcode = op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	mbox->type3D.channel = sdev->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	mbox->type3D.target = sdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	mbox->type3D.addr = pdev_info_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	status = myrb_exec_cmd(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	mutex_unlock(&cb->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 			 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	if (status == MYRB_STATUS_SUCCESS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	    mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		myrb_translate_devstate(pdev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) static char *myrb_event_msg[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	"killed because write recovery failed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	"killed because of SCSI bus reset failure",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	"killed because of double check condition",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	"killed because it was removed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	"killed because of gross error on SCSI chip",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	"killed because of bad tag returned from drive",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	"killed because of timeout on SCSI command",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	"killed because of reset SCSI command issued from system",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	"killed because busy or parity error count exceeded limit",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	"killed because of 'kill drive' command from system",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	"killed because of selection timeout",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	"killed due to SCSI phase sequence error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	"killed due to unknown status",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  * myrb_get_event - get event log from HBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  * @cb: pointer to the hba structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  * @event: number of the event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  * Execute a type 3E command and logs the event message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	struct myrb_log_entry *ev_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	dma_addr_t ev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	ev_buf = dma_alloc_coherent(&cb->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 				    sizeof(struct myrb_log_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 				    &ev_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	if (!ev_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	myrb_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	mbox->type3E.id = MYRB_MCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	mbox->type3E.opqual = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	mbox->type3E.ev_seq = event;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	mbox->type3E.addr = ev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	status = myrb_exec_cmd(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	if (status != MYRB_STATUS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 			     "Failed to get event log %d, status %04x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 			     event, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	else if (ev_buf->seq_num == event) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		struct scsi_sense_hdr sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		memset(&sshdr, 0, sizeof(sshdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		if (sshdr.sense_key == VENDOR_SPECIFIC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		    sshdr.asc == 0x80 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		    sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 			shost_printk(KERN_CRIT, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 				     "Physical drive %d:%d: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 				     ev_buf->channel, ev_buf->target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 				     myrb_event_msg[sshdr.ascq]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 			shost_printk(KERN_CRIT, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 				     "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 				     ev_buf->channel, ev_buf->target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 				     sshdr.sense_key, sshdr.asc, sshdr.ascq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 			  ev_buf, ev_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  * myrb_get_errtable - retrieves the error table from the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  * Executes a type 3 command and logs the error table from the controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) static void myrb_get_errtable(struct myrb_hba *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	memcpy(&old_table, cb->err_table, sizeof(old_table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	myrb_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	mbox->type3.id = MYRB_MCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	mbox->type3.addr = cb->err_table_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	status = myrb_exec_cmd(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	if (status == MYRB_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		struct myrb_error_entry *table = cb->err_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		struct myrb_error_entry *new, *old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		size_t err_table_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		shost_for_each_device(sdev, cb->host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 			if (sdev->channel >= myrb_logical_channel(cb->host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 			err_table_offset = sdev->channel * MYRB_MAX_TARGETS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 				+ sdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 			new = table + err_table_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 			old = &old_table[err_table_offset];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			if (new->parity_err == old->parity_err &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 			    new->soft_err == old->soft_err &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 			    new->hard_err == old->hard_err &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 			    new->misc_err == old->misc_err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 			sdev_printk(KERN_CRIT, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 				    "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 				    new->parity_err, new->soft_err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 				    new->hard_err, new->misc_err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382)  * myrb_get_ldev_info - retrieves the logical device table from the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384)  * Executes a type 3 command and updates the logical device table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386)  * Return: command status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	struct Scsi_Host *shost = cb->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 				 cb->ldev_info_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (status != MYRB_STATUS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		struct myrb_ldev_info *old = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 					  ldev_num, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 		if (!sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 			if (new->state == MYRB_DEVICE_OFFLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 			shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 				     "Adding Logical Drive %d in state %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 				     ldev_num, myrb_devstate_name(new->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 			scsi_add_device(shost, myrb_logical_channel(shost),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 					ldev_num, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		old = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		if (new->state != old->state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 			shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 				     "Logical Drive %d is now %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 				     ldev_num, myrb_devstate_name(new->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		if (new->wb_enabled != old->wb_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 				    "Logical Drive is now WRITE %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 				    (new->wb_enabled ? "BACK" : "THRU"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 		memcpy(old, new, sizeof(*new));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432)  * myrb_get_rbld_progress - get rebuild progress information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434)  * Executes a type 3 command and returns the rebuild progress
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435)  * information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437)  * Return: command status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		struct myrb_rbld_progress *rbld)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	struct myrb_rbld_progress *rbld_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	dma_addr_t rbld_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 				      sizeof(struct myrb_rbld_progress),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 				      &rbld_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	if (!rbld_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		return MYRB_STATUS_RBLD_NOT_CHECKED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	myrb_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	mbox->type3.id = MYRB_MCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	mbox->type3.addr = rbld_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	status = myrb_exec_cmd(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	if (rbld)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			  rbld_buf, rbld_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467)  * myrb_update_rbld_progress - updates the rebuild status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469)  * Updates the rebuild status for the attached logical devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) static void myrb_update_rbld_progress(struct myrb_hba *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	struct myrb_rbld_progress rbld_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	status = myrb_get_rbld_progress(cb, &rbld_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	    cb->last_rbld_status == MYRB_STATUS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		status = MYRB_STATUS_RBLD_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 		unsigned int blocks_done =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 			rbld_buf.ldev_size - rbld_buf.blocks_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		sdev = scsi_device_lookup(cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 					  myrb_logical_channel(cb->host),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 					  rbld_buf.ldev_num, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		if (!sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		case MYRB_STATUS_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 				    "Rebuild in Progress, %d%% completed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 				    (100 * (blocks_done >> 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 				    / (rbld_buf.ldev_size >> 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 				    "Rebuild Failed due to Logical Drive Failure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 		case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 				    "Rebuild Failed due to Bad Blocks on Other Drives\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 				    "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		case MYRB_STATUS_RBLD_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 				    "Rebuild Completed Successfully\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 				     "Rebuild Successfully Terminated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 		scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	cb->last_rbld_status = status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528)  * myrb_get_cc_progress - retrieve the rebuild status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  * Execute a type 3 Command and fetch the rebuild / consistency check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531)  * status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) static void myrb_get_cc_progress(struct myrb_hba *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	struct myrb_rbld_progress *rbld_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	dma_addr_t rbld_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 				      sizeof(struct myrb_rbld_progress),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 				      &rbld_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	if (!rbld_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		cb->need_cc_status = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	myrb_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	mbox->type3.id = MYRB_MCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	mbox->type3.addr = rbld_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	status = myrb_exec_cmd(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	if (status == MYRB_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		unsigned int ldev_num = rbld_buf->ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		unsigned int ldev_size = rbld_buf->ldev_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		unsigned int blocks_done =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			ldev_size - rbld_buf->blocks_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		sdev = scsi_device_lookup(cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 					  myrb_logical_channel(cb->host),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 					  ldev_num, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		if (sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 				    "Consistency Check in Progress: %d%% completed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 				    (100 * (blocks_done >> 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 				    / (ldev_size >> 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 			scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			  rbld_buf, rbld_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  * myrb_bgi_control - updates background initialisation status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  * Executes a type 3B command and updates the background initialisation status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) static void myrb_bgi_control(struct myrb_hba *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	struct myrb_bgi_status *bgi, *last_bgi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	dma_addr_t bgi_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	struct scsi_device *sdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 				 &bgi_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	if (!bgi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		shost_printk(KERN_ERR, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 			     "Failed to allocate bgi memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	myrb_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	mbox->type3B.id = MYRB_DCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	mbox->type3B.optype = 0x20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	mbox->type3B.addr = bgi_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	status = myrb_exec_cmd(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	last_bgi = &cb->bgi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	sdev = scsi_device_lookup(cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 				  myrb_logical_channel(cb->host),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 				  bgi->ldev_num, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	case MYRB_STATUS_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		switch (bgi->status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		case MYRB_BGI_INVALID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		case MYRB_BGI_STARTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			if (!sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 				    "Background Initialization Started\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		case MYRB_BGI_INPROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 			if (!sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			if (bgi->blocks_done == last_bgi->blocks_done &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 			    bgi->ldev_num == last_bgi->ldev_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 				 "Background Initialization in Progress: %d%% completed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 				 (100 * (bgi->blocks_done >> 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 				 / (bgi->ldev_size >> 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 		case MYRB_BGI_SUSPENDED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			if (!sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 				    "Background Initialization Suspended\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		case MYRB_BGI_CANCELLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 			if (!sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 				    "Background Initialization Cancelled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	case MYRB_STATUS_BGI_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 				    "Background Initialization Completed Successfully\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		cb->bgi_status.status = MYRB_BGI_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	case MYRB_STATUS_BGI_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 				    "Background Initialization Aborted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	case MYRB_STATUS_NO_BGI_INPROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		cb->bgi_status.status = MYRB_BGI_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	if (sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			  bgi, bgi_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665)  * myrb_hba_enquiry - updates the controller status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667)  * Executes a DAC_V1_Enquiry command and updates the controller status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669)  * Return: command status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	struct myrb_enquiry old, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	if (status != MYRB_STATUS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	new = cb->enquiry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	if (new->ldev_count > old.ldev_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		int ldev_num = old.ldev_count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		while (++ldev_num < new->ldev_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 			shost_printk(KERN_CRIT, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 				     "Logical Drive %d Now Exists\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 				     ldev_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	if (new->ldev_count < old.ldev_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		int ldev_num = new->ldev_count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 		while (++ldev_num < old.ldev_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			shost_printk(KERN_CRIT, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 				     "Logical Drive %d No Longer Exists\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 				     ldev_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	if (new->status.deferred != old.status.deferred)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		shost_printk(KERN_CRIT, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 			     "Deferred Write Error Flag is now %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 			     (new->status.deferred ? "TRUE" : "FALSE"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	if (new->ev_seq != old.ev_seq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		cb->new_ev_seq = new->ev_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		cb->need_err_info = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			     "Event log %d/%d (%d/%d) available\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			     cb->old_ev_seq, cb->new_ev_seq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 			     old.ev_seq, new->ev_seq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	if ((new->ldev_critical > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	     new->ldev_critical != old.ldev_critical) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	    (new->ldev_offline > 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	     new->ldev_offline != old.ldev_offline) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	    (new->ldev_count != old.ldev_count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			     "Logical drive count changed (%d/%d/%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 			     new->ldev_critical,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			     new->ldev_offline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 			     new->ldev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		cb->need_ldev_info = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	if (new->pdev_dead > 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	    new->pdev_dead != old.pdev_dead ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	    time_after_eq(jiffies, cb->secondary_monitor_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 			  + MYRB_SECONDARY_MONITOR_INTERVAL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		cb->need_bgi_status = cb->bgi_status_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		cb->secondary_monitor_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	    new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	    old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	    old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		cb->need_rbld = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		cb->rbld_first = (new->ldev_critical < old.ldev_critical);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		switch (new->rbld) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 				     "Consistency Check Completed Successfully\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		case MYRB_STDBY_RBLD_IN_PROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		case MYRB_BG_RBLD_IN_PROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		case MYRB_BG_CHECK_IN_PROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 			cb->need_cc_status = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 				     "Consistency Check Completed with Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 			shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 				     "Consistency Check Failed - Physical Device Failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 			shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 				     "Consistency Check Failed - Logical Drive Failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 				     "Consistency Check Failed - Other Causes\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 				     "Consistency Check Successfully Terminated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		cb->need_cc_status = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	return MYRB_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777)  * myrb_set_pdev_state - sets the device state for a physical device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779)  * Return: command status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		struct scsi_device *sdev, enum myrb_devstate state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	mutex_lock(&cb->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	mbox->type3D.id = MYRB_DCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	mbox->type3D.channel = sdev->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	mbox->type3D.target = sdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	mbox->type3D.state = state & 0x1F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	status = myrb_exec_cmd(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	mutex_unlock(&cb->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801)  * myrb_enable_mmio - enables the Memory Mailbox Interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803)  * PD and P controller types have no memory mailbox, but still need the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804)  * other dma mapped memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806)  * Return: true on success, false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	void __iomem *base = cb->io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	struct pci_dev *pdev = cb->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	size_t err_table_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	size_t ldev_info_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 	union myrb_cmd_mbox *cmd_mbox_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	struct myrb_stat_mbox *stat_mbox_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	union myrb_cmd_mbox mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 		dev_err(&pdev->dev, "DMA mask out of range\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	cb->enquiry = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 					 sizeof(struct myrb_enquiry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 					 &cb->enquiry_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	if (!cb->enquiry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	err_table_size = sizeof(struct myrb_error_entry) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 					   &cb->err_table_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	if (!cb->err_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 					       &cb->ldev_info_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	if (!cb->ldev_info_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	 * Skip mailbox initialisation for PD and P Controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (!mmio_init_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	/* These are the base addresses for the command memory mailbox array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	cb->cmd_mbox_size =  MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 						cb->cmd_mbox_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 						&cb->cmd_mbox_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 						GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	if (!cb->first_cmd_mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	cmd_mbox_mem = cb->first_cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	cb->last_cmd_mbox = cmd_mbox_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	cb->next_cmd_mbox = cb->first_cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	/* These are the base addresses for the status memory mailbox array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	    sizeof(struct myrb_stat_mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 						 cb->stat_mbox_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 						 &cb->stat_mbox_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 						 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	if (!cb->first_stat_mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	stat_mbox_mem = cb->first_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	cb->last_stat_mbox = stat_mbox_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	cb->next_stat_mbox = cb->first_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	/* Enable the Memory Mailbox Interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	cb->dual_mode_interface = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	mbox.typeX.opcode = 0x2B;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	mbox.typeX.id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	mbox.typeX.opcode2 = 0x14;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	status = mmio_init_fn(pdev, base, &mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	if (status != MYRB_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		cb->dual_mode_interface = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		mbox.typeX.opcode2 = 0x10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		status = mmio_init_fn(pdev, base, &mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		if (status != MYRB_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 			dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 				"Failed to enable mailbox, statux %02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 				status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906)  * myrb_get_hba_config - reads the configuration information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908)  * Reads the configuration information from the controller and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909)  * initializes the controller structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911)  * Return: 0 on success, errno otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) static int myrb_get_hba_config(struct myrb_hba *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	struct myrb_enquiry2 *enquiry2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	dma_addr_t enquiry2_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	struct myrb_config2 *config2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	dma_addr_t config2_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	struct Scsi_Host *shost = cb->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	struct pci_dev *pdev = cb->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	int pchan_max = 0, pchan_cur = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	int ret = -ENODEV, memsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 				      &enquiry2_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	if (!enquiry2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		shost_printk(KERN_ERR, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 			     "Failed to allocate V1 enquiry2 memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 				     &config2_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	if (!config2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		shost_printk(KERN_ERR, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			     "Failed to allocate V1 config2 memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 				  enquiry2, enquiry2_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	mutex_lock(&cb->dma_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	status = myrb_hba_enquiry(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	mutex_unlock(&cb->dma_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	if (status != MYRB_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		shost_printk(KERN_WARNING, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			     "Failed it issue V1 Enquiry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	if (status != MYRB_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		shost_printk(KERN_WARNING, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			     "Failed to issue V1 Enquiry2\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	if (status != MYRB_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		shost_printk(KERN_WARNING, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 			     "Failed to issue ReadConfig2\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	status = myrb_get_ldev_info(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	if (status != MYRB_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		shost_printk(KERN_WARNING, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			     "Failed to get logical drive information\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	 * Initialize the Controller Model Name and Full Model Name fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	switch (enquiry2->hw.sub_model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	case DAC960_V1_P_PD_PU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			strcpy(cb->model_name, "DAC960PU");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			strcpy(cb->model_name, "DAC960PD");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	case DAC960_V1_PL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		strcpy(cb->model_name, "DAC960PL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	case DAC960_V1_PG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		strcpy(cb->model_name, "DAC960PG");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	case DAC960_V1_PJ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		strcpy(cb->model_name, "DAC960PJ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	case DAC960_V1_PR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		strcpy(cb->model_name, "DAC960PR");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	case DAC960_V1_PT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		strcpy(cb->model_name, "DAC960PT");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	case DAC960_V1_PTL0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		strcpy(cb->model_name, "DAC960PTL0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	case DAC960_V1_PRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		strcpy(cb->model_name, "DAC960PRL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	case DAC960_V1_PTL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		strcpy(cb->model_name, "DAC960PTL1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	case DAC960_V1_1164P:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		strcpy(cb->model_name, "eXtremeRAID 1100");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		shost_printk(KERN_WARNING, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 			     "Unknown Model %X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 			     enquiry2->hw.sub_model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	 * Initialize the Controller Firmware Version field and verify that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	 * is a supported firmware version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	 * The supported firmware versions are:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	 * DAC1164P		    5.06 and above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	 * DAC960PTL/PRL/PJ/PG	    4.06 and above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	 * DAC960PU/PD/PL	    3.51 and above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	 * DAC960PU/PD/PL/P	    2.73 and above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) #if defined(CONFIG_ALPHA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	 * DEC Alpha machines were often equipped with DAC960 cards that were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	 * the last custom FW revision to be released by DEC for these older
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	 * controllers, appears to work quite well with this driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	 * Cards tested successfully were several versions each of the PD and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	 * back of the board, of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	 * KZPSC:  D040347 (1-channel) or D040348 (2-channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	 *         or D040349 (3-channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	 * KZPAC:  D040395 (1-channel) or D040396 (2-channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	 *         or D040397 (3-channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) # define FIRMWARE_27X	"2.70"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) # define FIRMWARE_27X	"2.73"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	if (enquiry2->fw.major_version == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		enquiry2->fw.major_version = cb->enquiry->fw_major_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		enquiry2->fw.firmware_type = '0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		enquiry2->fw.turn_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	snprintf(cb->fw_version, sizeof(cb->fw_version),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		"%u.%02u-%c-%02u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		enquiry2->fw.major_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		enquiry2->fw.minor_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		enquiry2->fw.firmware_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		enquiry2->fw.turn_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	if (!((enquiry2->fw.major_version == 5 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	       enquiry2->fw.minor_version >= 6) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	      (enquiry2->fw.major_version == 4 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	       enquiry2->fw.minor_version >= 6) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	      (enquiry2->fw.major_version == 3 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	       enquiry2->fw.minor_version >= 51) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	      (enquiry2->fw.major_version == 2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	       strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		shost_printk(KERN_WARNING, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			"Firmware Version '%s' unsupported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 			cb->fw_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	 * Initialize the Channels, Targets, Memory Size, and SAF-TE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	 * Enclosure Management Enabled fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	switch (enquiry2->hw.model) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	case MYRB_5_CHANNEL_BOARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		pchan_max = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	case MYRB_3_CHANNEL_BOARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	case MYRB_3_CHANNEL_ASIC_DAC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		pchan_max = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	case MYRB_2_CHANNEL_BOARD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		pchan_max = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		pchan_max = enquiry2->cfg_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	pchan_cur = enquiry2->cur_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		cb->bus_width = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		cb->bus_width = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		cb->bus_width = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	cb->ldev_block_size = enquiry2->ldev_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	shost->max_channel = pchan_cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	shost->max_id = enquiry2->max_targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	memsize = enquiry2->mem_size >> 20;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	 * Initialize the Controller Queue Depth, Driver Queue Depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	 * Logical Drive Count, Maximum Blocks per Command, Controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	 * The Driver Queue Depth must be at most one less than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	 * Controller Queue Depth to allow for an automatic drive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	 * rebuild operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	shost->can_queue = cb->enquiry->max_tcq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	if (shost->can_queue < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		shost->can_queue = enquiry2->max_cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	if (shost->can_queue < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		/* Play safe and disable TCQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		shost->can_queue = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	shost->max_sectors = enquiry2->max_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	shost->sg_tablesize = enquiry2->max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		>> (10 - MYRB_BLKSIZE_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		>> (10 - MYRB_BLKSIZE_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	/* Assume 255/63 translation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	cb->ldev_geom_heads = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	cb->ldev_geom_sectors = 63;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	if (config2->drive_geometry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		cb->ldev_geom_heads = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		cb->ldev_geom_sectors = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	 * Initialize the Background Initialization Status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	if ((cb->fw_version[0] == '4' &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	     strcmp(cb->fw_version, "4.08") >= 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	    (cb->fw_version[0] == '5' &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	     strcmp(cb->fw_version, "5.08") >= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		cb->bgi_status_supported = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		myrb_bgi_control(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		"Configuring %s PCI RAID Controller\n", cb->model_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		"  Firmware Version: %s, Memory Size: %dMB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		cb->fw_version, memsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	if (cb->io_addr == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 			"  I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			(unsigned long)cb->pci_addr, cb->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			"  I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 			(unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 			cb->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		"  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		cb->host->can_queue, cb->host->max_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		     "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		     cb->host->can_queue, cb->host->sg_tablesize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		     MYRB_SCATTER_GATHER_LIMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		     "  Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		     cb->stripe_size, cb->segment_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 		     cb->ldev_geom_heads, cb->ldev_geom_sectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		     cb->safte_enabled ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		     "  SAF-TE Enclosure Management Enabled" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 		     "  Physical: %d/%d channels %d/%d/%d devices\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		     pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		     cb->host->max_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	shost_printk(KERN_INFO, cb->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		     "  Logical: 1/1 channels, %d/%d disks\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 		     cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 			  enquiry2, enquiry2_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 			  config2, config2_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)  * myrb_unmap - unmaps controller structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) static void myrb_unmap(struct myrb_hba *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	if (cb->ldev_info_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 			MYRB_MAX_LDEVS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		dma_free_coherent(&cb->pdev->dev, ldev_info_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 				  cb->ldev_info_buf, cb->ldev_info_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		cb->ldev_info_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	if (cb->err_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		size_t err_table_size = sizeof(struct myrb_error_entry) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 			MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		dma_free_coherent(&cb->pdev->dev, err_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 				  cb->err_table, cb->err_table_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		cb->err_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	if (cb->enquiry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 				  cb->enquiry, cb->enquiry_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		cb->enquiry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	if (cb->first_stat_mbox) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 		dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 				  cb->first_stat_mbox, cb->stat_mbox_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		cb->first_stat_mbox = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	if (cb->first_cmd_mbox) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 				  cb->first_cmd_mbox, cb->cmd_mbox_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		cb->first_cmd_mbox = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)  * myrb_cleanup - cleanup controller structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) static void myrb_cleanup(struct myrb_hba *cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	struct pci_dev *pdev = cb->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	/* Free the memory mailbox, status, and related structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	myrb_unmap(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	if (cb->mmio_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		cb->disable_intr(cb->io_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		iounmap(cb->mmio_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	if (cb->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		free_irq(cb->irq, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	if (cb->io_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		release_region(cb->io_addr, 0x80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	pci_set_drvdata(pdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	scsi_host_put(cb->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) static int myrb_host_reset(struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	struct Scsi_Host *shost = scmd->device->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	struct myrb_hba *cb = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	cb->reset(cb->io_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	struct myrb_hba *cb = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	struct myrb_dcdb *dcdb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	dma_addr_t dcdb_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	struct scsi_device *sdev = scmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	struct scatterlist *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	int nsge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	myrb_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	if (!dcdb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	nsge = scsi_dma_map(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	if (nsge > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		scmd->result = (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	mbox->type3.opcode = MYRB_CMD_DCDB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	mbox->type3.id = scmd->request->tag + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	mbox->type3.addr = dcdb_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	dcdb->channel = sdev->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	dcdb->target = sdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	switch (scmd->sc_data_direction) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	case DMA_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	dcdb->early_status = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	if (scmd->request->timeout <= 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	else if (scmd->request->timeout <= 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	else if (scmd->request->timeout <= 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	dcdb->no_autosense = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	dcdb->allow_disconnect = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	sgl = scsi_sglist(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	dcdb->dma_addr = sg_dma_address(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	if (sg_dma_len(sgl) > USHRT_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		dcdb->xfer_len_lo = sg_dma_len(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		dcdb->xfer_len_hi4 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	dcdb->cdb_len = scmd->cmd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	dcdb->sense_len = sizeof(dcdb->sense);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	spin_lock_irqsave(&cb->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	cb->qcmd(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	spin_unlock_irqrestore(&cb->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) static void myrb_inquiry(struct myrb_hba *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	unsigned char inq[36] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		0x20, 0x20, 0x20, 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	if (cb->bus_width > 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 		inq[7] |= 1 << 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	if (cb->bus_width > 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		inq[7] |= 1 << 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	memcpy(&inq[16], cb->model_name, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	memcpy(&inq[32], cb->fw_version, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	memcpy(&inq[33], &cb->fw_version[2], 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	memcpy(&inq[35], &cb->fw_version[7], 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		struct myrb_ldev_info *ldev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	unsigned char modes[32], *mode_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	bool dbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	size_t mode_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	dbd = (scmd->cmnd[1] & 0x08) == 0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	if (dbd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		mode_len = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		mode_pg = &modes[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		mode_len = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		mode_pg = &modes[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	memset(modes, 0, sizeof(modes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	modes[0] = mode_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	if (!dbd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		unsigned char *block_desc = &modes[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		modes[3] = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		put_unaligned_be32(ldev_info->size, &block_desc[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	mode_pg[0] = 0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	mode_pg[1] = 0x12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	if (ldev_info->wb_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		mode_pg[2] |= 0x04;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	if (cb->segment_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		mode_pg[2] |= 0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		put_unaligned_be16(cb->segment_size, &mode_pg[14]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	scsi_sg_copy_from_buffer(scmd, modes, mode_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) static void myrb_request_sense(struct myrb_hba *cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	scsi_build_sense_buffer(0, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 				NO_SENSE, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 				 SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		struct myrb_ldev_info *ldev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	unsigned char data[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	dev_dbg(&scmd->device->sdev_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 		"Capacity %u, blocksize %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		ldev_info->size, cb->ldev_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	put_unaligned_be32(ldev_info->size - 1, &data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	put_unaligned_be32(cb->ldev_block_size, &data[4]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	scsi_sg_copy_from_buffer(scmd, data, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	struct myrb_hba *cb = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	struct myrb_ldev_info *ldev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	struct scsi_device *sdev = scmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	struct scatterlist *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	u64 lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	u32 block_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	int nsge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	if (ldev_info->state != MYRB_DEVICE_ONLINE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	    ldev_info->state != MYRB_DEVICE_WO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 			sdev->id, ldev_info ? ldev_info->state : 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		scmd->result = (DID_BAD_TARGET << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	switch (scmd->cmnd[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	case TEST_UNIT_READY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		scmd->result = (DID_OK << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	case INQUIRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 		if (scmd->cmnd[1] & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 			/* Illegal request, invalid field in CDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 			scsi_build_sense_buffer(0, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 						ILLEGAL_REQUEST, 0x24, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 			scmd->result = (DRIVER_SENSE << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 				SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 			myrb_inquiry(cb, scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 			scmd->result = (DID_OK << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	case SYNCHRONIZE_CACHE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		scmd->result = (DID_OK << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	case MODE_SENSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		    (scmd->cmnd[2] & 0x3F) != 0x08) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 			/* Illegal request, invalid field in CDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 			scsi_build_sense_buffer(0, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 						ILLEGAL_REQUEST, 0x24, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 			scmd->result = (DRIVER_SENSE << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 				SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 			myrb_mode_sense(cb, scmd, ldev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 			scmd->result = (DID_OK << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	case READ_CAPACITY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		if ((scmd->cmnd[1] & 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 		    (scmd->cmnd[8] & 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 			/* Illegal request, invalid field in CDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 			scsi_build_sense_buffer(0, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 						ILLEGAL_REQUEST, 0x24, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			scmd->result = (DRIVER_SENSE << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 				SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 			scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		lba = get_unaligned_be32(&scmd->cmnd[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		if (lba) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 			/* Illegal request, invalid field in CDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 			scsi_build_sense_buffer(0, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 						ILLEGAL_REQUEST, 0x24, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 			scmd->result = (DRIVER_SENSE << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 				SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 			scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		myrb_read_capacity(cb, scmd, ldev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	case REQUEST_SENSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		myrb_request_sense(cb, scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		scmd->result = (DID_OK << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	case SEND_DIAGNOSTIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		if (scmd->cmnd[1] != 0x04) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 			/* Illegal request, invalid field in CDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 			scsi_build_sense_buffer(0, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 						ILLEGAL_REQUEST, 0x24, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 			scmd->result = (DRIVER_SENSE << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 				SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 			/* Assume good status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 			scmd->result = (DID_OK << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	case READ_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		if (ldev_info->state == MYRB_DEVICE_WO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			/* Data protect, attempt to read invalid data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 			scsi_build_sense_buffer(0, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 						DATA_PROTECT, 0x21, 0x06);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 			scmd->result = (DRIVER_SENSE << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 				SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 			scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	case WRITE_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 		lba = (((scmd->cmnd[1] & 0x1F) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 		       (scmd->cmnd[2] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 		       scmd->cmnd[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		block_cnt = scmd->cmnd[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	case READ_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 		if (ldev_info->state == MYRB_DEVICE_WO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 			/* Data protect, attempt to read invalid data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 			scsi_build_sense_buffer(0, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 						DATA_PROTECT, 0x21, 0x06);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 			scmd->result = (DRIVER_SENSE << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 				SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 			scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	case WRITE_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	case VERIFY:		/* 0x2F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	case WRITE_VERIFY:	/* 0x2E */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 		lba = get_unaligned_be32(&scmd->cmnd[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	case READ_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 		if (ldev_info->state == MYRB_DEVICE_WO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 			/* Data protect, attempt to read invalid data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 			scsi_build_sense_buffer(0, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 						DATA_PROTECT, 0x21, 0x06);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 			scmd->result = (DRIVER_SENSE << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 				SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 			scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	case WRITE_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	case VERIFY_12: /* 0xAF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	case WRITE_VERIFY_12:	/* 0xAE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		lba = get_unaligned_be32(&scmd->cmnd[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		/* Illegal request, invalid opcode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		scsi_build_sense_buffer(0, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 					ILLEGAL_REQUEST, 0x20, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	myrb_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	mbox->type5.id = scmd->request->tag + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	if (scmd->sc_data_direction == DMA_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		goto submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	nsge = scsi_dma_map(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	if (nsge == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 		sgl = scsi_sglist(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 			mbox->type5.opcode = MYRB_CMD_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 			mbox->type5.opcode = MYRB_CMD_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		mbox->type5.ld.xfer_len = block_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		mbox->type5.ld.ldev_num = sdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		mbox->type5.lba = lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		mbox->type5.addr = (u32)sg_dma_address(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		struct myrb_sge *hw_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 		dma_addr_t hw_sgl_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 		hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 		if (!hw_sgl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 			return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 		cmd_blk->sgl = hw_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 		cmd_blk->sgl_addr = hw_sgl_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 			mbox->type5.opcode = MYRB_CMD_READ_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 			mbox->type5.opcode = MYRB_CMD_WRITE_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 		mbox->type5.ld.xfer_len = block_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		mbox->type5.ld.ldev_num = sdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 		mbox->type5.lba = lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		mbox->type5.addr = hw_sgl_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		mbox->type5.sg_count = nsge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		scsi_for_each_sg(scmd, sgl, nsge, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 			hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 			hw_sgl->sge_count = (u32)sg_dma_len(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 			hw_sgl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) submit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	spin_lock_irqsave(&cb->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	cb->qcmd(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	spin_unlock_irqrestore(&cb->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) static int myrb_queuecommand(struct Scsi_Host *shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	struct scsi_device *sdev = scmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	if (sdev->channel > myrb_logical_channel(shost)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		scmd->result = (DID_BAD_TARGET << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	if (sdev->channel == myrb_logical_channel(shost))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		return myrb_ldev_queuecommand(shost, scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	return myrb_pthru_queuecommand(shost, scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	struct myrb_hba *cb = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	struct myrb_ldev_info *ldev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	unsigned short ldev_num = sdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	enum raid_level level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	ldev_info = cb->ldev_info_buf + ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	if (!ldev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	if (!sdev->hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	dev_dbg(&sdev->sdev_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		"slave alloc ldev %d state %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		ldev_num, ldev_info->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	memcpy(sdev->hostdata, ldev_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	       sizeof(*ldev_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	switch (ldev_info->raid_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	case MYRB_RAID_LEVEL0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		level = RAID_LEVEL_LINEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	case MYRB_RAID_LEVEL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		level = RAID_LEVEL_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	case MYRB_RAID_LEVEL3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		level = RAID_LEVEL_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	case MYRB_RAID_LEVEL5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		level = RAID_LEVEL_5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	case MYRB_RAID_LEVEL6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		level = RAID_LEVEL_6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	case MYRB_RAID_JBOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		level = RAID_LEVEL_JBOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		level = RAID_LEVEL_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 	raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 	struct myrb_hba *cb = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 	struct myrb_pdev_state *pdev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	if (sdev->id > MYRB_MAX_TARGETS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	if (!pdev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 				  sdev, pdev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	if (status != MYRB_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		dev_dbg(&sdev->sdev_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 			"Failed to get device state, status %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 			status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 		kfree(pdev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	if (!pdev_info->present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		dev_dbg(&sdev->sdev_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 			"device not present, skip\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 		kfree(pdev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	dev_dbg(&sdev->sdev_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 		"slave alloc pdev %d:%d state %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 		sdev->channel, sdev->id, pdev_info->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	sdev->hostdata = pdev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) static int myrb_slave_alloc(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	if (sdev->channel > myrb_logical_channel(sdev->host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	if (sdev->lun > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	if (sdev->channel == myrb_logical_channel(sdev->host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		return myrb_ldev_slave_alloc(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	return myrb_pdev_slave_alloc(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) static int myrb_slave_configure(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	struct myrb_ldev_info *ldev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	if (sdev->channel > myrb_logical_channel(sdev->host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	if (sdev->channel < myrb_logical_channel(sdev->host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 		sdev->no_uld_attach = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	if (sdev->lun != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 	ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	if (!ldev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	if (ldev_info->state != MYRB_DEVICE_ONLINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 			    "Logical drive is %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 			    myrb_devstate_name(ldev_info->state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 	sdev->tagged_supported = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) static void myrb_slave_destroy(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	kfree(sdev->hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		sector_t capacity, int geom[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	struct myrb_hba *cb = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	geom[0] = cb->ldev_geom_heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 	geom[1] = cb->ldev_geom_sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	geom[2] = sector_div(capacity, geom[0] * geom[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) static ssize_t raid_state_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	struct myrb_hba *cb = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	if (!sdev->hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 		return snprintf(buf, 16, "Unknown\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		name = myrb_devstate_name(ldev_info->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 		if (name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 			ret = snprintf(buf, 32, "%s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 			ret = snprintf(buf, 32, "Invalid (%02X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 				       ldev_info->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 		struct myrb_pdev_state *pdev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 		unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 		const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 					  sdev, pdev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 		if (status != MYRB_STATUS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 				    "Failed to get device state, status %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 				    status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		if (!pdev_info->present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 			name = "Removed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 			name = myrb_devstate_name(pdev_info->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		if (name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 			ret = snprintf(buf, 32, "%s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 			ret = snprintf(buf, 32, "Invalid (%02X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 				       pdev_info->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) static ssize_t raid_state_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	struct myrb_hba *cb = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	struct myrb_pdev_state *pdev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	enum myrb_devstate new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	if (!strncmp(buf, "kill", 4) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	    !strncmp(buf, "offline", 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		new_state = MYRB_DEVICE_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	else if (!strncmp(buf, "online", 6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		new_state = MYRB_DEVICE_ONLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	else if (!strncmp(buf, "standby", 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		new_state = MYRB_DEVICE_STANDBY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 	pdev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	if (!pdev_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 			    "Failed - no physical device information\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	if (!pdev_info->present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 			    "Failed - device not present\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	if (pdev_info->state == new_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 	status = myrb_set_pdev_state(cb, sdev, new_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	case MYRB_STATUS_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	case MYRB_STATUS_START_DEVICE_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 		sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 			     "Failed - Unable to Start Device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		count = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	case MYRB_STATUS_NO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 		sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 			    "Failed - No Device at Address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		count = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 			 "Failed - Invalid Channel or Target or Modifier\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		count = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	case MYRB_STATUS_CHANNEL_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 		sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 			 "Failed - Channel Busy\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 		count = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 		sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 			 "Failed - Unexpected Status %04X\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 		count = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) static DEVICE_ATTR_RW(raid_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) static ssize_t raid_level_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 		const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 		if (!ldev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 			return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 		name = myrb_raidlevel_name(ldev_info->raid_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 		if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 			return snprintf(buf, 32, "Invalid (%02X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 					ldev_info->state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 		return snprintf(buf, 32, "%s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	return snprintf(buf, 32, "Physical Drive\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) static DEVICE_ATTR_RO(raid_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) static ssize_t rebuild_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	struct myrb_hba *cb = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	struct myrb_rbld_progress rbld_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 	unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	if (sdev->channel < myrb_logical_channel(sdev->host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		return snprintf(buf, 32, "physical device - not rebuilding\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	status = myrb_get_rbld_progress(cb, &rbld_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 	if (rbld_buf.ldev_num != sdev->id ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	    status != MYRB_STATUS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 		return snprintf(buf, 32, "not rebuilding\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	return snprintf(buf, 32, "rebuilding block %u of %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 			rbld_buf.ldev_size - rbld_buf.blocks_left,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 			rbld_buf.ldev_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) static ssize_t rebuild_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 		struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	struct myrb_hba *cb = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	struct myrb_cmdblk *cmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	union myrb_cmd_mbox *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	int rc, start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	const char *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	rc = kstrtoint(buf, 0, &start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	if (sdev->channel >= myrb_logical_channel(sdev->host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	status = myrb_get_rbld_progress(cb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	if (start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		if (status == MYRB_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 				    "Rebuild Not Initiated; already in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 			return -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		mutex_lock(&cb->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		cmd_blk = &cb->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		myrb_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		mbox->type3D.id = MYRB_DCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		mbox->type3D.channel = sdev->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		mbox->type3D.target = sdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 		status = myrb_exec_cmd(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		mutex_unlock(&cb->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		struct pci_dev *pdev = cb->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		unsigned char *rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		dma_addr_t rate_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 		if (status != MYRB_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 				    "Rebuild Not Cancelled; not in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 					  &rate_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		if (rate == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 				    "Cancellation of Rebuild Failed - Out of Memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		mutex_lock(&cb->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 		cmd_blk = &cb->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		myrb_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		mbox->type3R.id = MYRB_DCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		mbox->type3R.rbld_rate = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		mbox->type3R.addr = rate_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		status = myrb_exec_cmd(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 		mutex_unlock(&cb->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	if (status == MYRB_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 			    start ? "Initiated" : "Cancelled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	if (!start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 			    "Rebuild Not Cancelled, status 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 			    status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 		msg = "Attempt to Rebuild Online or Unresponsive Drive";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		msg = "New Disk Failed During Rebuild";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	case MYRB_STATUS_INVALID_ADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		msg = "Invalid Device Address";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		msg = "Already in Progress";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		msg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	if (msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 			    "Rebuild Failed - %s\n", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 			    "Rebuild Failed, status 0x%x\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) static DEVICE_ATTR_RW(rebuild);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) static ssize_t consistency_check_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	struct myrb_hba *cb = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	struct myrb_rbld_progress rbld_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	struct myrb_cmdblk *cmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	union myrb_cmd_mbox *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	unsigned short ldev_num = 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	int rc, start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 	const char *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 	rc = kstrtoint(buf, 0, &start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	if (sdev->channel < myrb_logical_channel(sdev->host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	status = myrb_get_rbld_progress(cb, &rbld_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	if (start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 		if (status == MYRB_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 				    "Check Consistency Not Initiated; already in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 			return -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 		mutex_lock(&cb->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 		cmd_blk = &cb->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 		myrb_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 		mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 		mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		mbox->type3C.id = MYRB_DCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 		mbox->type3C.ldev_num = sdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 		mbox->type3C.auto_restore = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 		status = myrb_exec_cmd(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 		mutex_unlock(&cb->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 		struct pci_dev *pdev = cb->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 		unsigned char *rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 		dma_addr_t rate_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 		if (ldev_num != sdev->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 				    "Check Consistency Not Cancelled; not in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 					  &rate_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 		if (rate == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 			sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 				    "Cancellation of Check Consistency Failed - Out of Memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 		mutex_lock(&cb->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 		cmd_blk = &cb->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 		myrb_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 		mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 		mbox->type3R.id = MYRB_DCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 		mbox->type3R.rbld_rate = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		mbox->type3R.addr = rate_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		status = myrb_exec_cmd(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		mutex_unlock(&cb->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	if (status == MYRB_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 		sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 			    start ? "Initiated" : "Cancelled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	if (!start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 			    "Check Consistency Not Cancelled, status 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 			    status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		msg = "Dependent Physical Device is DEAD";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		msg = "New Disk Failed During Rebuild";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	case MYRB_STATUS_INVALID_ADDRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		msg = "Invalid or Nonredundant Logical Drive";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		msg = "Already in Progress";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 		msg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	if (msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 		sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 			    "Check Consistency Failed - %s\n", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 		sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 			    "Check Consistency Failed, status 0x%x\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) static ssize_t consistency_check_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 		struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	return rebuild_show(dev, attr, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) static DEVICE_ATTR_RW(consistency_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) static ssize_t ctlr_num_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 		struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	struct myrb_hba *cb = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	return snprintf(buf, 20, "%u\n", cb->ctlr_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) static DEVICE_ATTR_RO(ctlr_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) static ssize_t firmware_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 		struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	struct myrb_hba *cb = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 	return snprintf(buf, 16, "%s\n", cb->fw_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) static DEVICE_ATTR_RO(firmware);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) static ssize_t model_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 		struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	struct myrb_hba *cb = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 	return snprintf(buf, 16, "%s\n", cb->model_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) static DEVICE_ATTR_RO(model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) static ssize_t flush_cache_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	struct myrb_hba *cb = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 	status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	if (status == MYRB_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 			     "Cache Flush Completed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 	shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 		     "Cache Flush Failed, status %x\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) static DEVICE_ATTR_WO(flush_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) static struct device_attribute *myrb_sdev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	&dev_attr_rebuild,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	&dev_attr_consistency_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 	&dev_attr_raid_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	&dev_attr_raid_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) static struct device_attribute *myrb_shost_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 	&dev_attr_ctlr_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 	&dev_attr_model,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 	&dev_attr_firmware,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	&dev_attr_flush_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) static struct scsi_host_template myrb_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 	.module			= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	.name			= "DAC960",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	.proc_name		= "myrb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	.queuecommand		= myrb_queuecommand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	.eh_host_reset_handler	= myrb_host_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	.slave_alloc		= myrb_slave_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	.slave_configure	= myrb_slave_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 	.slave_destroy		= myrb_slave_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 	.bios_param		= myrb_biosparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 	.cmd_size		= sizeof(struct myrb_cmdblk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	.shost_attrs		= myrb_shost_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 	.sdev_attrs		= myrb_sdev_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	.this_id		= -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)  * myrb_is_raid - return boolean indicating device is raid volume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247)  * @dev the device struct object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) static int myrb_is_raid(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	return sdev->channel == myrb_logical_channel(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257)  * myrb_get_resync - get raid volume resync percent complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)  * @dev the device struct object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) static void myrb_get_resync(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	struct myrb_hba *cb = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 	struct myrb_rbld_progress rbld_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 	unsigned int percent_complete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	unsigned int ldev_size = 0, remaining = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	if (sdev->channel < myrb_logical_channel(sdev->host))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 	status = myrb_get_rbld_progress(cb, &rbld_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	if (status == MYRB_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		if (rbld_buf.ldev_num == sdev->id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 			ldev_size = rbld_buf.ldev_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 			remaining = rbld_buf.blocks_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	if (remaining && ldev_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		percent_complete = (ldev_size - remaining) * 100 / ldev_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 	raid_set_resync(myrb_raid_template, dev, percent_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284)  * myrb_get_state - get raid volume status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285)  * @dev the device struct object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) static void myrb_get_state(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	struct myrb_hba *cb = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	struct myrb_ldev_info *ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	enum raid_state state = RAID_STATE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		state = RAID_STATE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 		status = myrb_get_rbld_progress(cb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 		if (status == MYRB_STATUS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 			state = RAID_STATE_RESYNCING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 			switch (ldev_info->state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 			case MYRB_DEVICE_ONLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 				state = RAID_STATE_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 			case MYRB_DEVICE_WO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 			case MYRB_DEVICE_CRITICAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 				state = RAID_STATE_DEGRADED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 				state = RAID_STATE_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	raid_set_state(myrb_raid_template, dev, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) static struct raid_function_template myrb_raid_functions = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	.cookie		= &myrb_template,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	.is_raid	= myrb_is_raid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	.get_resync	= myrb_get_resync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	.get_state	= myrb_get_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 		struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	if (!cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 	scsi_dma_unmap(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	if (cmd_blk->dcdb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 		memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 		dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 			      cmd_blk->dcdb_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		cmd_blk->dcdb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	if (cmd_blk->sgl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 		dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 		cmd_blk->sgl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 		cmd_blk->sgl_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	status = cmd_blk->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	case MYRB_STATUS_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	case MYRB_STATUS_DEVICE_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 		scmd->result = (DID_OK << 16) | status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	case MYRB_STATUS_BAD_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 		dev_dbg(&scmd->device->sdev_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 			"Bad Data Encountered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 			/* Unrecovered read error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 			scsi_build_sense_buffer(0, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 						MEDIUM_ERROR, 0x11, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 			/* Write error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 			scsi_build_sense_buffer(0, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 						MEDIUM_ERROR, 0x0C, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 		scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 	case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 		scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 			/* Unrecovered read error, auto-reallocation failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 			scsi_build_sense_buffer(0, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 						MEDIUM_ERROR, 0x11, 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 			/* Write error, auto-reallocation failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 			scsi_build_sense_buffer(0, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 						MEDIUM_ERROR, 0x0C, 0x02);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 		scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 	case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 		dev_dbg(&scmd->device->sdev_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 			    "Logical Drive Nonexistent or Offline");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 		scmd->result = (DID_BAD_TARGET << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 		dev_dbg(&scmd->device->sdev_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 			    "Attempt to Access Beyond End of Logical Drive");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 		/* Logical block address out of range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 		scsi_build_sense_buffer(0, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 					NOT_READY, 0x21, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 	case MYRB_STATUS_DEVICE_NONRESPONSIVE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 		dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 		scmd->result = (DID_BAD_TARGET << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 		scmd_printk(KERN_ERR, scmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 			    "Unexpected Error Status %04X", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 		scmd->result = (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	if (!cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	if (cmd_blk->completion) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 		complete(cmd_blk->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 		cmd_blk->completion = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) static void myrb_monitor(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	struct myrb_hba *cb = container_of(work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 			struct myrb_hba, monitor_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	struct Scsi_Host *shost = cb->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	dev_dbg(&shost->shost_gendev, "monitor tick\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 	if (cb->new_ev_seq > cb->old_ev_seq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 		int event = cb->old_ev_seq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 		dev_dbg(&shost->shost_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 			"get event log no %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 			cb->new_ev_seq, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 		myrb_get_event(cb, event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 		cb->old_ev_seq = event + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 		interval = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	} else if (cb->need_err_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 		cb->need_err_info = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 		dev_dbg(&shost->shost_gendev, "get error table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 		myrb_get_errtable(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 		interval = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	} else if (cb->need_rbld && cb->rbld_first) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 		cb->need_rbld = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 		dev_dbg(&shost->shost_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 			"get rebuild progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 		myrb_update_rbld_progress(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 		interval = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	} else if (cb->need_ldev_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 		cb->need_ldev_info = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		dev_dbg(&shost->shost_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 			"get logical drive info\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 		myrb_get_ldev_info(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 		interval = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	} else if (cb->need_rbld) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 		cb->need_rbld = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 		dev_dbg(&shost->shost_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 			"get rebuild progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 		myrb_update_rbld_progress(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 		interval = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	} else if (cb->need_cc_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 		cb->need_cc_status = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 		dev_dbg(&shost->shost_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 			"get consistency check progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 		myrb_get_cc_progress(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 		interval = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	} else if (cb->need_bgi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 		cb->need_bgi_status = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 		dev_dbg(&shost->shost_gendev, "get background init status\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 		myrb_bgi_control(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 		interval = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 		dev_dbg(&shost->shost_gendev, "new enquiry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 		mutex_lock(&cb->dma_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 		myrb_hba_enquiry(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 		mutex_unlock(&cb->dma_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 		if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 		    cb->need_err_info || cb->need_rbld ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 		    cb->need_ldev_info || cb->need_cc_status ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 		    cb->need_bgi_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 			dev_dbg(&shost->shost_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 				"reschedule monitor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 			interval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	if (interval > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 		cb->primary_monitor_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 	queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)  * myrb_err_status - reports controller BIOS messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)  * Controller BIOS messages are passed through the Error Status Register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)  * when the driver performs the BIOS handshaking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490)  * Return: true for fatal errors and false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 		unsigned char parm0, unsigned char parm1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	struct pci_dev *pdev = cb->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	switch (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 	case 0x00:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 		dev_info(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 			 "Physical Device %d:%d Not Responding\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 			 parm1, parm0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 	case 0x08:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 		dev_notice(&pdev->dev, "Spinning Up Drives\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 	case 0x30:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 		dev_notice(&pdev->dev, "Configuration Checksum Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 	case 0x60:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 		dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 	case 0x70:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 		dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	case 0x90:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 		dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 			   parm1, parm0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	case 0xA0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 		dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	case 0xB0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 		dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	case 0xD0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 		dev_notice(&pdev->dev, "New Controller Configuration Found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 	case 0xF0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 		dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 		dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 			error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)  * Hardware-specific functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)  * DAC960 LA Series Controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 	writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) static inline void DAC960_LA_gen_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) static inline void DAC960_LA_reset_ctrl(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) static inline bool DAC960_LA_init_in_progress(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	return !(idb & DAC960_LA_IDB_INIT_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) static inline void DAC960_LA_ack_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 	       base + DAC960_LA_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) static inline void DAC960_LA_enable_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	unsigned char odb = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) static inline void DAC960_LA_disable_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	unsigned char odb = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) static inline bool DAC960_LA_intr_enabled(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 		union myrb_cmd_mbox *mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 	mem_mbox->words[1] = mbox->words[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	mem_mbox->words[2] = mbox->words[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 	mem_mbox->words[3] = mbox->words[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	/* Memory barrier to prevent reordering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 	mem_mbox->words[0] = mbox->words[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	/* Memory barrier to force PCI access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 		union myrb_cmd_mbox *mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 	writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	return readb(base + DAC960_LA_STSID_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) static inline unsigned short DAC960_LA_read_status(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	return readw(base + DAC960_LA_STS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 		unsigned char *param0, unsigned char *param1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 	unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	if (!(errsts & DAC960_LA_ERRSTS_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	errsts &= ~DAC960_LA_ERRSTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	*error = errsts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 	*param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 	*param1 = readb(base + DAC960_LA_CMDID_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) static inline unsigned short
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 		union myrb_cmd_mbox *mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 	int timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 		if (!DAC960_LA_hw_mbox_is_full(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 		timeout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 	if (DAC960_LA_hw_mbox_is_full(base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 			"Timeout waiting for empty mailbox\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 		return MYRB_STATUS_SUBSYS_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 	DAC960_LA_write_hw_mbox(base, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 	DAC960_LA_hw_mbox_new_cmd(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 		if (DAC960_LA_hw_mbox_status_available(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 		timeout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 	if (!DAC960_LA_hw_mbox_status_available(base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 		dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 		return MYRB_STATUS_SUBSYS_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 	status = DAC960_LA_read_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 	DAC960_LA_ack_hw_mbox_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	DAC960_LA_ack_hw_mbox_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) static int DAC960_LA_hw_init(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 		struct myrb_hba *cb, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	int timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	unsigned char error, parm0, parm1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	DAC960_LA_disable_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 	DAC960_LA_ack_hw_mbox_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 	udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 	while (DAC960_LA_init_in_progress(base) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) 	       timeout < MYRB_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 		if (DAC960_LA_read_error_status(base, &error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 					      &parm0, &parm1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 		    myrb_err_status(cb, error, parm0, parm1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 		timeout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 			"Timeout waiting for Controller Initialisation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 	if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 			"Unable to Enable Memory Mailbox Interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 		DAC960_LA_reset_ctrl(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 	DAC960_LA_enable_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 	cb->qcmd = myrb_qcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 	cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 	if (cb->dual_mode_interface)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 		cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 		cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 	cb->disable_intr = DAC960_LA_disable_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) 	cb->reset = DAC960_LA_reset_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 	struct myrb_hba *cb = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	void __iomem *base = cb->io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 	struct myrb_stat_mbox *next_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	spin_lock_irqsave(&cb->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 	DAC960_LA_ack_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	next_stat_mbox = cb->next_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	while (next_stat_mbox->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 		unsigned char id = next_stat_mbox->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 		struct scsi_cmnd *scmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 		struct myrb_cmdblk *cmd_blk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 		if (id == MYRB_DCMD_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 			cmd_blk = &cb->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 		else if (id == MYRB_MCMD_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 			cmd_blk = &cb->mcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 			scmd = scsi_host_find_tag(cb->host, id - 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 			if (scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 				cmd_blk = scsi_cmd_priv(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 		if (cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 			cmd_blk->status = next_stat_mbox->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 			dev_err(&cb->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 				"Unhandled command completion %d\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 		if (++next_stat_mbox > cb->last_stat_mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 			next_stat_mbox = cb->first_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 		if (cmd_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 			if (id < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 				myrb_handle_cmdblk(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 				myrb_handle_scsi(cb, cmd_blk, scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 	cb->next_stat_mbox = next_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 	spin_unlock_irqrestore(&cb->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) struct myrb_privdata DAC960_LA_privdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 	.hw_init =	DAC960_LA_hw_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	.irq_handler =	DAC960_LA_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	.mmio_size =	DAC960_LA_mmio_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)  * DAC960 PG Series Controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 	writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) static inline void DAC960_PG_gen_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 	writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) static inline void DAC960_PG_reset_ctrl(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 	writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 	writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	return idb & DAC960_PG_IDB_HWMBOX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) static inline bool DAC960_PG_init_in_progress(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 	return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 	writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) static inline void DAC960_PG_ack_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	       base + DAC960_PG_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 	return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 	return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) static inline void DAC960_PG_enable_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	unsigned int imask = (unsigned int)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 	imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) static inline void DAC960_PG_disable_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	unsigned int imask = (unsigned int)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) static inline bool DAC960_PG_intr_enabled(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 	unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) 		union myrb_cmd_mbox *mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 	mem_mbox->words[1] = mbox->words[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) 	mem_mbox->words[2] = mbox->words[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	mem_mbox->words[3] = mbox->words[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) 	/* Memory barrier to prevent reordering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 	mem_mbox->words[0] = mbox->words[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 	/* Memory barrier to force PCI access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) 		union myrb_cmd_mbox *mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) 	writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 	writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) 	writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 	writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) static inline unsigned char
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) DAC960_PG_read_status_cmd_ident(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 	return readb(base + DAC960_PG_STSID_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) static inline unsigned short
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) DAC960_PG_read_status(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	return readw(base + DAC960_PG_STS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 		unsigned char *param0, unsigned char *param1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 	unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 	if (!(errsts & DAC960_PG_ERRSTS_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 	errsts &= ~DAC960_PG_ERRSTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) 	*error = errsts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 	*param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 	*param1 = readb(base + DAC960_PG_CMDID_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 	writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) static inline unsigned short
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 		union myrb_cmd_mbox *mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 	unsigned short status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) 	int timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) 		if (!DAC960_PG_hw_mbox_is_full(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 		timeout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 	if (DAC960_PG_hw_mbox_is_full(base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 			"Timeout waiting for empty mailbox\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 		return MYRB_STATUS_SUBSYS_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 	DAC960_PG_write_hw_mbox(base, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) 	DAC960_PG_hw_mbox_new_cmd(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 	timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 		if (DAC960_PG_hw_mbox_status_available(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) 		timeout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 	if (!DAC960_PG_hw_mbox_status_available(base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 			"Timeout waiting for mailbox status\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 		return MYRB_STATUS_SUBSYS_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 	status = DAC960_PG_read_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) 	DAC960_PG_ack_hw_mbox_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 	DAC960_PG_ack_hw_mbox_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) 	return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) static int DAC960_PG_hw_init(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) 		struct myrb_hba *cb, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) 	int timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) 	unsigned char error, parm0, parm1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) 	DAC960_PG_disable_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) 	DAC960_PG_ack_hw_mbox_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) 	udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) 	while (DAC960_PG_init_in_progress(base) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) 	       timeout < MYRB_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) 		if (DAC960_PG_read_error_status(base, &error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) 						&parm0, &parm1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) 		    myrb_err_status(cb, error, parm0, parm1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) 		timeout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) 			"Timeout waiting for Controller Initialisation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) 	if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) 			"Unable to Enable Memory Mailbox Interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) 		DAC960_PG_reset_ctrl(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) 	DAC960_PG_enable_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) 	cb->qcmd = myrb_qcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) 	cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) 	if (cb->dual_mode_interface)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) 		cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) 		cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) 	cb->disable_intr = DAC960_PG_disable_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) 	cb->reset = DAC960_PG_reset_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) 	struct myrb_hba *cb = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) 	void __iomem *base = cb->io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) 	struct myrb_stat_mbox *next_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) 	spin_lock_irqsave(&cb->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) 	DAC960_PG_ack_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) 	next_stat_mbox = cb->next_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) 	while (next_stat_mbox->valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) 		unsigned char id = next_stat_mbox->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) 		struct scsi_cmnd *scmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) 		struct myrb_cmdblk *cmd_blk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) 		if (id == MYRB_DCMD_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) 			cmd_blk = &cb->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) 		else if (id == MYRB_MCMD_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) 			cmd_blk = &cb->mcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) 			scmd = scsi_host_find_tag(cb->host, id - 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) 			if (scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) 				cmd_blk = scsi_cmd_priv(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) 		if (cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) 			cmd_blk->status = next_stat_mbox->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) 			dev_err(&cb->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) 				"Unhandled command completion %d\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) 		if (++next_stat_mbox > cb->last_stat_mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) 			next_stat_mbox = cb->first_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) 		if (id < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) 			myrb_handle_cmdblk(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) 			myrb_handle_scsi(cb, cmd_blk, scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) 	cb->next_stat_mbox = next_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) 	spin_unlock_irqrestore(&cb->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) struct myrb_privdata DAC960_PG_privdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) 	.hw_init =	DAC960_PG_hw_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) 	.irq_handler =	DAC960_PG_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) 	.mmio_size =	DAC960_PG_mmio_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098)  * DAC960 PD Series Controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) 	writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) 	writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) static inline void DAC960_PD_gen_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) 	writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) static inline void DAC960_PD_reset_ctrl(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) 	writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) 	return idb & DAC960_PD_IDB_HWMBOX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) static inline bool DAC960_PD_init_in_progress(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) 	return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) static inline void DAC960_PD_ack_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) 	writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) 	unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) 	return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) static inline void DAC960_PD_enable_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) 	writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) static inline void DAC960_PD_disable_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) 	writeb(0, base + DAC960_PD_IRQEN_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) static inline bool DAC960_PD_intr_enabled(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) 	unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) 	return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) 		union myrb_cmd_mbox *mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) 	writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) 	writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) 	writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) 	writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) static inline unsigned char
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) DAC960_PD_read_status_cmd_ident(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) 	return readb(base + DAC960_PD_STSID_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) static inline unsigned short
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) DAC960_PD_read_status(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) 	return readw(base + DAC960_PD_STS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) 		unsigned char *param0, unsigned char *param1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) 	unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) 	if (!(errsts & DAC960_PD_ERRSTS_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) 	errsts &= ~DAC960_PD_ERRSTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) 	*error = errsts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) 	*param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) 	*param1 = readb(base + DAC960_PD_CMDID_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) 	writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) 	void __iomem *base = cb->io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) 	while (DAC960_PD_hw_mbox_is_full(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) 	DAC960_PD_write_cmd_mbox(base, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) 	DAC960_PD_hw_mbox_new_cmd(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) static int DAC960_PD_hw_init(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) 		struct myrb_hba *cb, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) 	int timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) 	unsigned char error, parm0, parm1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) 			(unsigned long)cb->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) 	DAC960_PD_disable_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) 	DAC960_PD_ack_hw_mbox_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) 	udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) 	while (DAC960_PD_init_in_progress(base) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) 	       timeout < MYRB_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) 		if (DAC960_PD_read_error_status(base, &error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) 					      &parm0, &parm1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) 		    myrb_err_status(cb, error, parm0, parm1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) 		timeout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) 			"Timeout waiting for Controller Initialisation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) 	if (!myrb_enable_mmio(cb, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) 			"Unable to Enable Memory Mailbox Interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) 		DAC960_PD_reset_ctrl(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) 	DAC960_PD_enable_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) 	cb->qcmd = DAC960_PD_qcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) 	cb->disable_intr = DAC960_PD_disable_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) 	cb->reset = DAC960_PD_reset_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) 	struct myrb_hba *cb = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) 	void __iomem *base = cb->io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) 	spin_lock_irqsave(&cb->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) 	while (DAC960_PD_hw_mbox_status_available(base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) 		struct scsi_cmnd *scmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) 		struct myrb_cmdblk *cmd_blk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) 		if (id == MYRB_DCMD_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) 			cmd_blk = &cb->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) 		else if (id == MYRB_MCMD_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) 			cmd_blk = &cb->mcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) 			scmd = scsi_host_find_tag(cb->host, id - 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) 			if (scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) 				cmd_blk = scsi_cmd_priv(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) 		if (cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) 			cmd_blk->status = DAC960_PD_read_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) 			dev_err(&cb->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) 				"Unhandled command completion %d\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) 		DAC960_PD_ack_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) 		DAC960_PD_ack_hw_mbox_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) 		if (id < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) 			myrb_handle_cmdblk(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) 			myrb_handle_scsi(cb, cmd_blk, scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) 	spin_unlock_irqrestore(&cb->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) struct myrb_privdata DAC960_PD_privdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) 	.hw_init =	DAC960_PD_hw_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) 	.irq_handler =	DAC960_PD_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) 	.mmio_size =	DAC960_PD_mmio_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301)  * DAC960 P Series Controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303)  * Similar to the DAC960 PD Series Controllers, but some commands have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304)  * to be translated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) static inline void myrb_translate_enquiry(void *enq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) 	memcpy(enq + 132, enq + 36, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) 	memset(enq + 36, 0, 96);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) static inline void myrb_translate_devstate(void *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) 	memcpy(state + 2, state + 3, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) 	memmove(state + 4, state + 5, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) 	memmove(state + 6, state + 8, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) 	int ldev_num = mbox->type5.ld.ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) 	mbox->bytes[3] &= 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) 	mbox->bytes[3] |= mbox->bytes[7] << 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) 	mbox->bytes[7] = ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) 	int ldev_num = mbox->bytes[7];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) 	mbox->bytes[7] = mbox->bytes[3] >> 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) 	mbox->bytes[3] &= 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) 	mbox->bytes[3] |= ldev_num << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) 	void __iomem *base = cb->io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) 	switch (mbox->common.opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) 	case MYRB_CMD_ENQUIRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) 		mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) 	case MYRB_CMD_GET_DEVICE_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) 		mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) 	case MYRB_CMD_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) 		mbox->common.opcode = MYRB_CMD_READ_OLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) 		myrb_translate_to_rw_command(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) 	case MYRB_CMD_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 		mbox->common.opcode = MYRB_CMD_WRITE_OLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) 		myrb_translate_to_rw_command(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) 	case MYRB_CMD_READ_SG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) 		mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) 		myrb_translate_to_rw_command(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) 	case MYRB_CMD_WRITE_SG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) 		mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) 		myrb_translate_to_rw_command(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) 	while (DAC960_PD_hw_mbox_is_full(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) 	DAC960_PD_write_cmd_mbox(base, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) 	DAC960_PD_hw_mbox_new_cmd(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) static int DAC960_P_hw_init(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) 		struct myrb_hba *cb, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) 	int timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) 	unsigned char error, parm0, parm1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) 			(unsigned long)cb->io_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) 	DAC960_PD_disable_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) 	DAC960_PD_ack_hw_mbox_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391) 	udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) 	while (DAC960_PD_init_in_progress(base) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) 	       timeout < MYRB_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) 		if (DAC960_PD_read_error_status(base, &error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) 						&parm0, &parm1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) 		    myrb_err_status(cb, error, parm0, parm1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) 			return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) 		timeout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) 			"Timeout waiting for Controller Initialisation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) 	if (!myrb_enable_mmio(cb, NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) 			"Unable to allocate DMA mapped memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) 		DAC960_PD_reset_ctrl(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) 		return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) 	DAC960_PD_enable_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) 	cb->qcmd = DAC960_P_qcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) 	cb->disable_intr = DAC960_PD_disable_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) 	cb->reset = DAC960_PD_reset_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) 	struct myrb_hba *cb = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) 	void __iomem *base = cb->io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) 	spin_lock_irqsave(&cb->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) 	while (DAC960_PD_hw_mbox_status_available(base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) 		struct scsi_cmnd *scmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) 		struct myrb_cmdblk *cmd_blk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) 		union myrb_cmd_mbox *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432) 		enum myrb_cmd_opcode op;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) 		if (id == MYRB_DCMD_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) 			cmd_blk = &cb->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) 		else if (id == MYRB_MCMD_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) 			cmd_blk = &cb->mcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) 			scmd = scsi_host_find_tag(cb->host, id - 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) 			if (scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) 				cmd_blk = scsi_cmd_priv(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) 		if (cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) 			cmd_blk->status = DAC960_PD_read_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) 			dev_err(&cb->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) 				"Unhandled command completion %d\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) 		DAC960_PD_ack_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) 		DAC960_PD_ack_hw_mbox_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) 		if (!cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) 		mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) 		op = mbox->common.opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) 		switch (op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) 		case MYRB_CMD_ENQUIRY_OLD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) 			mbox->common.opcode = MYRB_CMD_ENQUIRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) 			myrb_translate_enquiry(cb->enquiry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) 		case MYRB_CMD_READ_OLD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) 			mbox->common.opcode = MYRB_CMD_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) 			myrb_translate_from_rw_command(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) 		case MYRB_CMD_WRITE_OLD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) 			mbox->common.opcode = MYRB_CMD_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469) 			myrb_translate_from_rw_command(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) 		case MYRB_CMD_READ_SG_OLD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) 			mbox->common.opcode = MYRB_CMD_READ_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) 			myrb_translate_from_rw_command(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) 		case MYRB_CMD_WRITE_SG_OLD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) 			mbox->common.opcode = MYRB_CMD_WRITE_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) 			myrb_translate_from_rw_command(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) 		if (id < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) 			myrb_handle_cmdblk(cb, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) 			myrb_handle_scsi(cb, cmd_blk, scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) 	spin_unlock_irqrestore(&cb->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) struct myrb_privdata DAC960_P_privdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492) 	.hw_init =	DAC960_P_hw_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) 	.irq_handler =	DAC960_P_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) 	.mmio_size =	DAC960_PD_mmio_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) 		const struct pci_device_id *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) 	struct myrb_privdata *privdata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) 		(struct myrb_privdata *)entry->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) 	irq_handler_t irq_handler = privdata->irq_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503) 	unsigned int mmio_size = privdata->mmio_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) 	struct Scsi_Host *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) 	struct myrb_hba *cb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) 	shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) 	if (!shost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) 		dev_err(&pdev->dev, "Unable to allocate Controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) 	shost->max_cmd_len = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) 	shost->max_lun = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) 	cb = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) 	mutex_init(&cb->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) 	mutex_init(&cb->dma_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) 	cb->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) 	if (pci_enable_device(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) 		goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) 	if (privdata->hw_init == DAC960_PD_hw_init ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) 	    privdata->hw_init == DAC960_P_hw_init) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) 		cb->io_addr = pci_resource_start(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) 		cb->pci_addr = pci_resource_start(pdev, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) 		cb->pci_addr = pci_resource_start(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) 	pci_set_drvdata(pdev, cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) 	spin_lock_init(&cb->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) 	if (mmio_size < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) 		mmio_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) 	cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) 	if (cb->mmio_base == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) 			"Unable to map Controller Register Window\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) 		goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) 	cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) 	if (privdata->hw_init(pdev, cb, cb->io_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) 		goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) 	if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) 			"Unable to acquire IRQ Channel %d\n", pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) 		goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) 	cb->irq = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) 	return cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) 	dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) 		"Failed to initialize Controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) 	myrb_cleanup(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) 	struct myrb_hba *cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) 	cb = myrb_detect(dev, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) 	if (!cb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) 	ret = myrb_get_hba_config(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) 		myrb_cleanup(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) 	if (!myrb_create_mempools(dev, cb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) 	ret = scsi_add_host(cb->host, &dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) 		dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) 		myrb_destroy_mempools(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) 		goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) 	scsi_scan_host(cb->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588) 	myrb_cleanup(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) static void myrb_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) 	struct myrb_hba *cb = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) 	shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) 	myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) 	myrb_cleanup(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) 	myrb_destroy_mempools(cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) static const struct pci_device_id myrb_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) 		PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) 			       PCI_DEVICE_ID_DEC_21285,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) 			       PCI_VENDOR_ID_MYLEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) 			       PCI_DEVICE_ID_MYLEX_DAC960_LA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) 		.driver_data	= (unsigned long) &DAC960_LA_privdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) 		PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) 		PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) 		PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) 	{0, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) MODULE_DEVICE_TABLE(pci, myrb_id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) static struct pci_driver myrb_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) 	.name		= "myrb",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) 	.id_table	= myrb_id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) 	.probe		= myrb_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) 	.remove		= myrb_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) static int __init myrb_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) 	myrb_raid_template = raid_class_attach(&myrb_raid_functions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) 	if (!myrb_raid_template)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) 	ret = pci_register_driver(&myrb_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) 		raid_class_release(myrb_raid_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) static void __exit myrb_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) 	pci_unregister_driver(&myrb_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) 	raid_class_release(myrb_raid_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) module_init(myrb_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) module_exit(myrb_cleanup_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) MODULE_LICENSE("GPL");