^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This driver supports the newer, SCSI-based firmware interface only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Based on the original DAC960 driver, which has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Portions Copyright 2002 by Mylex (An IBM Business Unit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/raid_class.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "myrs.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) static struct raid_template *myrs_raid_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) static struct myrs_devstate_name_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) enum myrs_devstate state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) } myrs_devstate_name_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) { MYRS_DEVICE_UNCONFIGURED, "Unconfigured" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) { MYRS_DEVICE_ONLINE, "Online" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) { MYRS_DEVICE_REBUILD, "Rebuild" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) { MYRS_DEVICE_MISSING, "Missing" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) { MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) { MYRS_DEVICE_OFFLINE, "Offline" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) { MYRS_DEVICE_CRITICAL, "Critical" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) { MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) { MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) { MYRS_DEVICE_STANDBY, "Standby" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) { MYRS_DEVICE_INVALID_STATE, "Invalid" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static char *myrs_devstate_name(enum myrs_devstate state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct myrs_devstate_name_entry *entry = myrs_devstate_name_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (entry[i].state == state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return entry[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static struct myrs_raid_level_name_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) enum myrs_raid_level level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) } myrs_raid_level_name_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) { MYRS_RAID_LEVEL0, "RAID0" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) { MYRS_RAID_LEVEL1, "RAID1" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) { MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) { MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) { MYRS_RAID_LEVEL6, "RAID6" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) { MYRS_RAID_JBOD, "JBOD" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) { MYRS_RAID_NEWSPAN, "New Mylex SPAN" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) { MYRS_RAID_LEVEL3F, "RAID3 fixed parity" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) { MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) { MYRS_RAID_SPAN, "Mylex SPAN" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) { MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) { MYRS_RAID_LEVELE, "RAIDE (concatenation)" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) { MYRS_RAID_PHYSICAL, "Physical device" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static char *myrs_raid_level_name(enum myrs_raid_level level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (entry[i].level == level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return entry[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) memset(mbox, 0, sizeof(union myrs_cmd_mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) cmd_blk->status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * myrs_qcmd - queues Command for DAC960 V2 Series Controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) void __iomem *base = cs->io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) cs->write_cmd_mbox(next_mbox, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (cs->prev_cmd_mbox1->words[0] == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) cs->prev_cmd_mbox2->words[0] == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) cs->get_cmd_mbox(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) cs->prev_cmd_mbox1 = next_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) if (++next_mbox > cs->last_cmd_mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) next_mbox = cs->first_cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) cs->next_cmd_mbox = next_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * myrs_exec_cmd - executes V2 Command and waits for completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static void myrs_exec_cmd(struct myrs_hba *cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct myrs_cmdblk *cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) DECLARE_COMPLETION_ONSTACK(complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) cmd_blk->complete = &complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) spin_lock_irqsave(&cs->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) myrs_qcmd(cs, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) spin_unlock_irqrestore(&cs->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) WARN_ON(in_interrupt());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) wait_for_completion(&complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * myrs_report_progress - prints progress message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) unsigned char *msg, unsigned long blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) shost_printk(KERN_INFO, cs->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) "Logical Drive %d: %s in Progress: %d%% completed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) ldev_num, msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) (100 * (int)(blocks >> 7)) / (int)(size >> 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * myrs_get_ctlr_info - executes a Controller Information IOCTL Command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) dma_addr_t ctlr_info_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) union myrs_sgl *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned short ldev_present, ldev_critical, ldev_offline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) ldev_present = cs->ctlr_info->ldev_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) ldev_critical = cs->ctlr_info->ldev_critical;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) ldev_offline = cs->ctlr_info->ldev_offline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) sizeof(struct myrs_ctlr_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return MYRS_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) mutex_lock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) myrs_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) mbox->ctlr_info.id = MYRS_DCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) mbox->ctlr_info.control.dma_ctrl_to_host = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) mbox->ctlr_info.control.no_autosense = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) mbox->ctlr_info.ctlr_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) sgl = &mbox->ctlr_info.dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) sgl->sge[0].sge_addr = ctlr_info_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) myrs_exec_cmd(cs, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) status = cmd_blk->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) mutex_unlock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) dma_unmap_single(&cs->pdev->dev, ctlr_info_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (status == MYRS_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (cs->ctlr_info->bg_init_active +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) cs->ctlr_info->ldev_init_active +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) cs->ctlr_info->pdev_init_active +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) cs->ctlr_info->cc_active +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) cs->ctlr_info->rbld_active +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) cs->ctlr_info->exp_active != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) cs->needs_update = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) if (cs->ctlr_info->ldev_present != ldev_present ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) cs->ctlr_info->ldev_critical != ldev_critical ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) cs->ctlr_info->ldev_offline != ldev_offline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) shost_printk(KERN_INFO, cs->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) "Logical drive count changes (%d/%d/%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) cs->ctlr_info->ldev_critical,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) cs->ctlr_info->ldev_offline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) cs->ctlr_info->ldev_present);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) static unsigned char myrs_get_ldev_info(struct myrs_hba *cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) unsigned short ldev_num, struct myrs_ldev_info *ldev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) dma_addr_t ldev_info_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) struct myrs_ldev_info ldev_info_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) union myrs_sgl *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) sizeof(struct myrs_ldev_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return MYRS_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) mutex_lock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) myrs_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) mbox->ldev_info.id = MYRS_DCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) mbox->ldev_info.control.dma_ctrl_to_host = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) mbox->ldev_info.control.no_autosense = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) mbox->ldev_info.ldev.ldev_num = ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) sgl = &mbox->ldev_info.dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) sgl->sge[0].sge_addr = ldev_info_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) sgl->sge[0].sge_count = mbox->ldev_info.dma_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) dev_dbg(&cs->host->shost_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) "Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) myrs_exec_cmd(cs, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) status = cmd_blk->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) mutex_unlock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) dma_unmap_single(&cs->pdev->dev, ldev_info_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) if (status == MYRS_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) unsigned short ldev_num = ldev_info->ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) struct myrs_ldev_info *new = ldev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct myrs_ldev_info *old = &ldev_info_orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) unsigned long ldev_size = new->cfg_devsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (new->dev_state != old->dev_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) name = myrs_devstate_name(new->dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) shost_printk(KERN_INFO, cs->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) "Logical Drive %d is now %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) ldev_num, name ? name : "Invalid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) if ((new->soft_errs != old->soft_errs) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) (new->cmds_failed != old->cmds_failed) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) (new->deferred_write_errs != old->deferred_write_errs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) shost_printk(KERN_INFO, cs->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) ldev_num, new->soft_errs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) new->cmds_failed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) new->deferred_write_errs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (new->bg_init_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) myrs_report_progress(cs, ldev_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) "Background Initialization",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) new->bg_init_lba, ldev_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) else if (new->fg_init_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) myrs_report_progress(cs, ldev_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) "Foreground Initialization",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) new->fg_init_lba, ldev_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) else if (new->migration_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) myrs_report_progress(cs, ldev_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) "Data Migration",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) new->migration_lba, ldev_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) else if (new->patrol_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) myrs_report_progress(cs, ldev_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) "Patrol Operation",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) new->patrol_lba, ldev_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) if (old->bg_init_active && !new->bg_init_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) shost_printk(KERN_INFO, cs->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) "Logical Drive %d: Background Initialization %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) ldev_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) (new->ldev_control.ldev_init_done ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) "Completed" : "Failed"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * myrs_get_pdev_info - executes a "Read Physical Device Information" Command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static unsigned char myrs_get_pdev_info(struct myrs_hba *cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) unsigned char channel, unsigned char target, unsigned char lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) struct myrs_pdev_info *pdev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) dma_addr_t pdev_info_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) union myrs_sgl *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) sizeof(struct myrs_pdev_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) return MYRS_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) mutex_lock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) myrs_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) mbox->pdev_info.id = MYRS_DCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) mbox->pdev_info.control.dma_ctrl_to_host = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) mbox->pdev_info.control.no_autosense = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) mbox->pdev_info.pdev.lun = lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) mbox->pdev_info.pdev.target = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) mbox->pdev_info.pdev.channel = channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) sgl = &mbox->pdev_info.dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) sgl->sge[0].sge_addr = pdev_info_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) dev_dbg(&cs->host->shost_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) "Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) channel, target, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) myrs_exec_cmd(cs, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) status = cmd_blk->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) mutex_unlock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) dma_unmap_single(&cs->pdev->dev, pdev_info_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * myrs_dev_op - executes a "Device Operation" Command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) static unsigned char myrs_dev_op(struct myrs_hba *cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) mutex_lock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) myrs_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) mbox->dev_op.id = MYRS_DCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) mbox->dev_op.control.dma_ctrl_to_host = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) mbox->dev_op.control.no_autosense = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) mbox->dev_op.ioctl_opcode = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) mbox->dev_op.opdev = opdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) myrs_exec_cmd(cs, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) status = cmd_blk->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) mutex_unlock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * myrs_translate_pdev - translates a Physical Device Channel and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) * TargetID into a Logical Device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static unsigned char myrs_translate_pdev(struct myrs_hba *cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) unsigned char channel, unsigned char target, unsigned char lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) struct myrs_devmap *devmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) struct pci_dev *pdev = cs->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) dma_addr_t devmap_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct myrs_cmdblk *cmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) union myrs_cmd_mbox *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) union myrs_sgl *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) memset(devmap, 0x0, sizeof(struct myrs_devmap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) devmap_addr = dma_map_single(&pdev->dev, devmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) sizeof(struct myrs_devmap),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if (dma_mapping_error(&pdev->dev, devmap_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return MYRS_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) mutex_lock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) cmd_blk = &cs->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) mbox->pdev_info.control.dma_ctrl_to_host = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) mbox->pdev_info.control.no_autosense = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) mbox->pdev_info.dma_size = sizeof(struct myrs_devmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) mbox->pdev_info.pdev.target = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) mbox->pdev_info.pdev.channel = channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) mbox->pdev_info.pdev.lun = lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) sgl = &mbox->pdev_info.dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) sgl->sge[0].sge_addr = devmap_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) myrs_exec_cmd(cs, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) status = cmd_blk->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) mutex_unlock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) dma_unmap_single(&pdev->dev, devmap_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) sizeof(struct myrs_devmap), DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * myrs_get_event - executes a Get Event Command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static unsigned char myrs_get_event(struct myrs_hba *cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) unsigned int event_num, struct myrs_event *event_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) struct pci_dev *pdev = cs->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) dma_addr_t event_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) union myrs_sgl *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) event_addr = dma_map_single(&pdev->dev, event_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) sizeof(struct myrs_event), DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (dma_mapping_error(&pdev->dev, event_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return MYRS_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) mbox->get_event.opcode = MYRS_CMD_OP_IOCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) mbox->get_event.dma_size = sizeof(struct myrs_event);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) mbox->get_event.evnum_upper = event_num >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) mbox->get_event.ctlr_num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) mbox->get_event.evnum_lower = event_num & 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) sgl = &mbox->get_event.dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) sgl->sge[0].sge_addr = event_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) sgl->sge[0].sge_count = mbox->get_event.dma_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) myrs_exec_cmd(cs, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) status = cmd_blk->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) dma_unmap_single(&pdev->dev, event_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) sizeof(struct myrs_event), DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * myrs_get_fwstatus - executes a Get Health Status Command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) static unsigned char myrs_get_fwstatus(struct myrs_hba *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) union myrs_sgl *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) unsigned char status = cmd_blk->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) myrs_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) mbox->common.opcode = MYRS_CMD_OP_IOCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) mbox->common.id = MYRS_MCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) mbox->common.control.dma_ctrl_to_host = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) mbox->common.control.no_autosense = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) mbox->common.dma_size = sizeof(struct myrs_fwstat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) sgl = &mbox->common.dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) sgl->sge[0].sge_addr = cs->fwstat_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) myrs_exec_cmd(cs, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) status = cmd_blk->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) enable_mbox_t enable_mbox_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) void __iomem *base = cs->io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct pci_dev *pdev = cs->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) union myrs_cmd_mbox *cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) struct myrs_stat_mbox *stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) union myrs_cmd_mbox *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) dma_addr_t mbox_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) unsigned char status = MYRS_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) dev_err(&pdev->dev, "DMA mask out of range\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) /* Temporary dma mapping, used only in the scope of this function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) &mbox_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (dma_mapping_error(&pdev->dev, mbox_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /* These are the base addresses for the command memory mailbox array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) &cs->cmd_mbox_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) dev_err(&pdev->dev, "Failed to map command mailbox\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) cs->first_cmd_mbox = cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) cmd_mbox += MYRS_MAX_CMD_MBOX - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) cs->last_cmd_mbox = cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) cs->next_cmd_mbox = cs->first_cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) cs->prev_cmd_mbox1 = cs->last_cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) /* These are the base addresses for the status memory mailbox array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) &cs->stat_mbox_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) dev_err(&pdev->dev, "Failed to map status mailbox\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) cs->first_stat_mbox = stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) stat_mbox += MYRS_MAX_STAT_MBOX - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) cs->last_stat_mbox = stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) cs->next_stat_mbox = cs->first_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) sizeof(struct myrs_fwstat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) &cs->fwstat_addr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) cs->fwstat_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (!cs->ctlr_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) cs->event_buf = kzalloc(sizeof(struct myrs_event),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (!cs->event_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) /* Enable the Memory Mailbox Interface. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) memset(mbox, 0, sizeof(union myrs_cmd_mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) mbox->set_mbox.id = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) mbox->set_mbox.control.no_autosense = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) mbox->set_mbox.first_cmd_mbox_size_kb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) (MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) mbox->set_mbox.first_stat_mbox_size_kb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) (MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) mbox->set_mbox.second_cmd_mbox_size_kb = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) mbox->set_mbox.second_stat_mbox_size_kb = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) mbox->set_mbox.sense_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) mbox->set_mbox.fwstat_buf_size_kb = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) status = enable_mbox_fn(base, mbox_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) mbox, mbox_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (status != MYRS_STATUS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) return (status == MYRS_STATUS_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * myrs_get_config - reads the Configuration Information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) static int myrs_get_config(struct myrs_hba *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) struct myrs_ctlr_info *info = cs->ctlr_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct Scsi_Host *shost = cs->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) unsigned char model[20];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) unsigned char fw_version[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) int i, model_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /* Get data into dma-able area, then copy into permanent location */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) mutex_lock(&cs->cinfo_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) status = myrs_get_ctlr_info(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) mutex_unlock(&cs->cinfo_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) if (status != MYRS_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) shost_printk(KERN_ERR, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) "Failed to get controller information\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) /* Initialize the Controller Model Name and Full Model Name fields. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) model_len = sizeof(info->ctlr_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) if (model_len > sizeof(model)-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) model_len = sizeof(model)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) memcpy(model, info->ctlr_name, model_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) model_len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) while (model[model_len] == ' ' || model[model_len] == '\0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) model_len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) model[++model_len] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) strcpy(cs->model_name, "DAC960 ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) strcat(cs->model_name, model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) /* Initialize the Controller Firmware Version field. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) sprintf(fw_version, "%d.%02d-%02d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) info->fw_major_version, info->fw_minor_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) info->fw_turn_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (info->fw_major_version == 6 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) info->fw_minor_version == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) info->fw_turn_number < 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) shost_printk(KERN_WARNING, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) "FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) "STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) "PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) fw_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) /* Initialize the Controller Channels and Targets. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) shost->max_channel = info->physchan_present + info->virtchan_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) shost->max_id = info->max_targets[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) for (i = 1; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) if (!info->max_targets[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (shost->max_id < info->max_targets[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) shost->max_id = info->max_targets[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * Initialize the Controller Queue Depth, Driver Queue Depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * Logical Drive Count, Maximum Blocks per Command, Controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * The Driver Queue Depth must be at most three less than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * the Controller Queue Depth; tag '1' is reserved for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * direct commands, and tag '2' for monitoring commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) shost->can_queue = info->max_tcq - 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) shost->can_queue = MYRS_MAX_CMD_MBOX - 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) shost->max_sectors = info->max_transfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) shost->sg_tablesize = info->max_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (shost->sg_tablesize > MYRS_SG_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) shost->sg_tablesize = MYRS_SG_LIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) "Configuring %s PCI RAID Controller\n", model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) " Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) fw_version, info->physchan_present, info->mem_size_mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) shost->can_queue, shost->max_sectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) for (i = 0; i < info->physchan_max; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) if (!info->max_targets[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) " Device Channel %d: max %d devices\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) i, info->max_targets[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) " Physical: %d/%d channels, %d disks, %d devices\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) info->physchan_present, info->physchan_max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) info->pdisk_present, info->pdev_present);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) " Logical: %d/%d channels, %d disks\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) info->virtchan_present, info->virtchan_max,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) info->ldev_present);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * myrs_log_event - prints a Controller Event message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) int ev_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) unsigned char *ev_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) } myrs_ev_list[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) /* Physical Device Events (0x0000 - 0x007F) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) { 0x0001, "P Online" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) { 0x0002, "P Standby" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) { 0x0005, "P Automatic Rebuild Started" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) { 0x0006, "P Manual Rebuild Started" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) { 0x0007, "P Rebuild Completed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) { 0x0008, "P Rebuild Cancelled" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) { 0x0009, "P Rebuild Failed for Unknown Reasons" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) { 0x000A, "P Rebuild Failed due to New Physical Device" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) { 0x000C, "S Offline" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) { 0x000D, "P Found" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) { 0x000E, "P Removed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) { 0x000F, "P Unconfigured" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) { 0x0010, "P Expand Capacity Started" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) { 0x0011, "P Expand Capacity Completed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) { 0x0012, "P Expand Capacity Failed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) { 0x0013, "P Command Timed Out" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) { 0x0014, "P Command Aborted" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) { 0x0015, "P Command Retried" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) { 0x0016, "P Parity Error" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) { 0x0017, "P Soft Error" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) { 0x0018, "P Miscellaneous Error" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) { 0x0019, "P Reset" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) { 0x001A, "P Active Spare Found" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) { 0x001B, "P Warm Spare Found" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) { 0x001C, "S Sense Data Received" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) { 0x001D, "P Initialization Started" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) { 0x001E, "P Initialization Completed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) { 0x001F, "P Initialization Failed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) { 0x0020, "P Initialization Cancelled" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) { 0x0021, "P Failed because Write Recovery Failed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) { 0x0022, "P Failed because SCSI Bus Reset Failed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) { 0x0023, "P Failed because of Double Check Condition" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) { 0x0024, "P Failed because Device Cannot Be Accessed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) { 0x0026, "P Failed because of Bad Tag from Device" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) { 0x0027, "P Failed because of Command Timeout" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) { 0x0028, "P Failed because of System Reset" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) { 0x0029, "P Failed because of Busy Status or Parity Error" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) { 0x002A, "P Failed because Host Set Device to Failed State" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) { 0x002B, "P Failed because of Selection Timeout" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) { 0x002C, "P Failed because of SCSI Bus Phase Error" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) { 0x002D, "P Failed because Device Returned Unknown Status" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) { 0x002E, "P Failed because Device Not Ready" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) { 0x002F, "P Failed because Device Not Found at Startup" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) { 0x0030, "P Failed because COD Write Operation Failed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) { 0x0031, "P Failed because BDT Write Operation Failed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) { 0x0039, "P Missing at Startup" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) { 0x003D, "P Standby Rebuild Started" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /* Logical Device Events (0x0080 - 0x00FF) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) { 0x0080, "M Consistency Check Started" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) { 0x0081, "M Consistency Check Completed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) { 0x0082, "M Consistency Check Cancelled" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) { 0x0083, "M Consistency Check Completed With Errors" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) { 0x0086, "L Offline" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) { 0x0087, "L Critical" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) { 0x0088, "L Online" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) { 0x0089, "M Automatic Rebuild Started" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) { 0x008A, "M Manual Rebuild Started" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) { 0x008B, "M Rebuild Completed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) { 0x008C, "M Rebuild Cancelled" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) { 0x008D, "M Rebuild Failed for Unknown Reasons" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) { 0x008E, "M Rebuild Failed due to New Physical Device" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) { 0x0090, "M Initialization Started" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) { 0x0091, "M Initialization Completed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) { 0x0092, "M Initialization Cancelled" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) { 0x0093, "M Initialization Failed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) { 0x0094, "L Found" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) { 0x0095, "L Deleted" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) { 0x0096, "M Expand Capacity Started" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) { 0x0097, "M Expand Capacity Completed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) { 0x0098, "M Expand Capacity Failed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) { 0x0099, "L Bad Block Found" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) { 0x009A, "L Size Changed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) { 0x009B, "L Type Changed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) { 0x009C, "L Bad Data Block Found" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) { 0x009E, "L Read of Data Block in BDT" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) { 0x009F, "L Write Back Data for Disk Block Lost" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) { 0x00A2, "L Standby Rebuild Started" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) /* Fault Management Events (0x0100 - 0x017F) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) { 0x0140, "E Fan %d Failed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) { 0x0141, "E Fan %d OK" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) { 0x0142, "E Fan %d Not Present" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) { 0x0143, "E Power Supply %d Failed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) { 0x0144, "E Power Supply %d OK" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) { 0x0145, "E Power Supply %d Not Present" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) { 0x0148, "E Temperature Sensor %d Temperature Normal" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) { 0x0149, "E Temperature Sensor %d Not Present" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) { 0x014A, "E Enclosure Management Unit %d Access Critical" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) { 0x014B, "E Enclosure Management Unit %d Access OK" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) { 0x014C, "E Enclosure Management Unit %d Access Offline" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /* Controller Events (0x0180 - 0x01FF) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) { 0x0181, "C Cache Write Back Error" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) { 0x0188, "C Battery Backup Unit Found" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) { 0x0189, "C Battery Backup Unit Charge Level Low" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) { 0x018A, "C Battery Backup Unit Charge Level OK" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) { 0x0193, "C Installation Aborted" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) { 0x0195, "C Battery Backup Unit Physically Removed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) { 0x0196, "C Memory Error During Warm Boot" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) { 0x019E, "C Memory Soft ECC Error Corrected" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) { 0x019F, "C Memory Hard ECC Error Corrected" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) { 0x01A2, "C Battery Backup Unit Failed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) { 0x01AB, "C Mirror Race Recovery Failed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) { 0x01AC, "C Mirror Race on Critical Drive" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) /* Controller Internal Processor Events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) { 0x0380, "C Internal Controller Hung" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) { 0x0381, "C Internal Controller Firmware Breakpoint" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) { 0x0390, "C Internal Controller i960 Processor Specific Error" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) { 0, "" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) int ev_idx = 0, ev_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) unsigned char ev_type, *ev_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) struct Scsi_Host *shost = cs->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) struct scsi_sense_hdr sshdr = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) unsigned char sense_info[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) unsigned char cmd_specific[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) if (ev->ev_code == 0x1C) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) memset(&sshdr, 0x0, sizeof(sshdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) memset(sense_info, 0x0, sizeof(sense_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) memset(cmd_specific, 0x0, sizeof(cmd_specific));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) memcpy(sense_info, &ev->sense_data[3], 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) memcpy(cmd_specific, &ev->sense_data[7], 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (sshdr.sense_key == VENDOR_SPECIFIC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) (sshdr.asc == 0x80 || sshdr.asc == 0x81))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) ev_code = myrs_ev_list[ev_idx].ev_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (ev_code == ev->ev_code || ev_code == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) ev_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) ev_type = myrs_ev_list[ev_idx].ev_msg[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) ev_msg = &myrs_ev_list[ev_idx].ev_msg[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (ev_code == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) shost_printk(KERN_WARNING, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) "Unknown Controller Event Code %04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) ev->ev_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) switch (ev_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) case 'P':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) sdev = scsi_device_lookup(shost, ev->channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) ev->target, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) ev->ev_seq, ev_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (sdev && sdev->hostdata &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) sdev->channel < cs->ctlr_info->physchan_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) struct myrs_pdev_info *pdev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) switch (ev->ev_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) case 0x0001:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) case 0x0007:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) pdev_info->dev_state = MYRS_DEVICE_ONLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) case 0x0002:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) pdev_info->dev_state = MYRS_DEVICE_STANDBY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) case 0x000C:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) pdev_info->dev_state = MYRS_DEVICE_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) case 0x000E:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) pdev_info->dev_state = MYRS_DEVICE_MISSING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) case 0x000F:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) case 'L':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) "event %d: Logical Drive %d %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) ev->ev_seq, ev->lun, ev_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) cs->needs_update = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) case 'M':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) "event %d: Logical Drive %d %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) ev->ev_seq, ev->lun, ev_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) cs->needs_update = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) case 'S':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) if (sshdr.sense_key == NO_SENSE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) (sshdr.sense_key == NOT_READY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) sshdr.asc == 0x04 && (sshdr.ascq == 0x01 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) sshdr.ascq == 0x02)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) "event %d: Physical Device %d:%d %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) ev->ev_seq, ev->channel, ev->target, ev_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) ev->channel, ev->target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) sshdr.sense_key, sshdr.asc, sshdr.ascq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) ev->channel, ev->target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) sense_info[0], sense_info[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) sense_info[2], sense_info[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) cmd_specific[0], cmd_specific[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) cmd_specific[2], cmd_specific[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) case 'E':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) if (cs->disable_enc_msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) sprintf(msg_buf, ev_msg, ev->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) ev->ev_seq, ev->target, msg_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) case 'C':
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) shost_printk(KERN_INFO, shost, "event %d: Controller %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) ev->ev_seq, ev_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) "event %d: Unknown Event Code %04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) ev->ev_seq, ev->ev_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * SCSI sysfs interface functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) static ssize_t raid_state_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) struct myrs_hba *cs = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (!sdev->hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) return snprintf(buf, 16, "Unknown\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) if (sdev->channel >= cs->ctlr_info->physchan_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct myrs_ldev_info *ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) name = myrs_devstate_name(ldev_info->dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) if (name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) ret = snprintf(buf, 32, "%s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) ret = snprintf(buf, 32, "Invalid (%02X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) ldev_info->dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) struct myrs_pdev_info *pdev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) pdev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) name = myrs_devstate_name(pdev_info->dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) if (name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) ret = snprintf(buf, 32, "%s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) ret = snprintf(buf, 32, "Invalid (%02X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) pdev_info->dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) static ssize_t raid_state_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) struct myrs_hba *cs = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) struct myrs_cmdblk *cmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) union myrs_cmd_mbox *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) enum myrs_devstate new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) unsigned short ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) if (!strncmp(buf, "offline", 7) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) !strncmp(buf, "kill", 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) new_state = MYRS_DEVICE_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) else if (!strncmp(buf, "online", 6))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) new_state = MYRS_DEVICE_ONLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) else if (!strncmp(buf, "standby", 7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) new_state = MYRS_DEVICE_STANDBY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) if (sdev->channel < cs->ctlr_info->physchan_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct myrs_pdev_info *pdev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) struct myrs_devmap *pdev_devmap =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) (struct myrs_devmap *)&pdev_info->rsvd13;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (pdev_info->dev_state == new_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) "Device already in %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) myrs_devstate_name(new_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) status = myrs_translate_pdev(cs, sdev->channel, sdev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) sdev->lun, pdev_devmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) if (status != MYRS_STATUS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) ldev_num = pdev_devmap->ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) struct myrs_ldev_info *ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) if (ldev_info->dev_state == new_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) "Device already in %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) myrs_devstate_name(new_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) ldev_num = ldev_info->ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) mutex_lock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) cmd_blk = &cs->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) myrs_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) mbox->common.opcode = MYRS_CMD_OP_IOCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) mbox->common.id = MYRS_DCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) mbox->common.control.dma_ctrl_to_host = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) mbox->common.control.no_autosense = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) mbox->set_devstate.state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) mbox->set_devstate.ldev.ldev_num = ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) myrs_exec_cmd(cs, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) status = cmd_blk->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) mutex_unlock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) if (status == MYRS_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) if (sdev->channel < cs->ctlr_info->physchan_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) struct myrs_pdev_info *pdev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) pdev_info->dev_state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) struct myrs_ldev_info *ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) ldev_info->dev_state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) "Set device state to %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) myrs_devstate_name(new_state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) "Failed to set device state to %s, status 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) myrs_devstate_name(new_state), status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) static DEVICE_ATTR_RW(raid_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static ssize_t raid_level_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct myrs_hba *cs = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) const char *name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) if (!sdev->hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) return snprintf(buf, 16, "Unknown\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) if (sdev->channel >= cs->ctlr_info->physchan_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) struct myrs_ldev_info *ldev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) name = myrs_raid_level_name(ldev_info->raid_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return snprintf(buf, 32, "Invalid (%02X)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) ldev_info->dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return snprintf(buf, 32, "%s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static DEVICE_ATTR_RO(raid_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) static ssize_t rebuild_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) struct myrs_hba *cs = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) struct myrs_ldev_info *ldev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) unsigned short ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) if (sdev->channel < cs->ctlr_info->physchan_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) return snprintf(buf, 32, "physical device - not rebuilding\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) ldev_num = ldev_info->ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (status != MYRS_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) "Failed to get device information, status 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) if (ldev_info->rbld_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) (size_t)ldev_info->rbld_lba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) (size_t)ldev_info->cfg_devsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) return snprintf(buf, 32, "not rebuilding\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) static ssize_t rebuild_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) struct myrs_hba *cs = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) struct myrs_ldev_info *ldev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) struct myrs_cmdblk *cmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) union myrs_cmd_mbox *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) unsigned short ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) int rebuild, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (sdev->channel < cs->ctlr_info->physchan_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) if (!ldev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) ldev_num = ldev_info->ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) ret = kstrtoint(buf, 0, &rebuild);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (status != MYRS_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) "Failed to get device information, status 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) if (rebuild && ldev_info->rbld_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) "Rebuild Not Initiated; already in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) if (!rebuild && !ldev_info->rbld_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) "Rebuild Not Cancelled; no rebuild in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) mutex_lock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) cmd_blk = &cs->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) myrs_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) mbox->common.opcode = MYRS_CMD_OP_IOCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) mbox->common.id = MYRS_DCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) mbox->common.control.dma_ctrl_to_host = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) mbox->common.control.no_autosense = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) if (rebuild) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) mbox->ldev_info.ldev.ldev_num = ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) mbox->ldev_info.ldev.ldev_num = ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) myrs_exec_cmd(cs, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) status = cmd_blk->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) mutex_unlock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) "Rebuild Not %s, status 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) rebuild ? "Initiated" : "Cancelled", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) rebuild ? "Initiated" : "Cancelled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) ret = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) static DEVICE_ATTR_RW(rebuild);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static ssize_t consistency_check_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) struct myrs_hba *cs = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) struct myrs_ldev_info *ldev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) unsigned short ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) if (sdev->channel < cs->ctlr_info->physchan_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) return snprintf(buf, 32, "physical device - not checking\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) if (!ldev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) ldev_num = ldev_info->ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) if (ldev_info->cc_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) return snprintf(buf, 32, "checking block %zu of %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) (size_t)ldev_info->cc_lba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) (size_t)ldev_info->cfg_devsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) return snprintf(buf, 32, "not checking\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) static ssize_t consistency_check_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) struct myrs_hba *cs = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) struct myrs_ldev_info *ldev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) struct myrs_cmdblk *cmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) union myrs_cmd_mbox *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) unsigned short ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) int check, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) if (sdev->channel < cs->ctlr_info->physchan_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (!ldev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) ldev_num = ldev_info->ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) ret = kstrtoint(buf, 0, &check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) if (status != MYRS_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) "Failed to get device information, status 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) if (check && ldev_info->cc_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) "Consistency Check Not Initiated; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) "already in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) return -EALREADY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) if (!check && !ldev_info->cc_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) "Consistency Check Not Cancelled; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) "check not in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) mutex_lock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) cmd_blk = &cs->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) myrs_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) mbox->common.opcode = MYRS_CMD_OP_IOCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) mbox->common.id = MYRS_DCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) mbox->common.control.dma_ctrl_to_host = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) mbox->common.control.no_autosense = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) if (check) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) mbox->cc.ldev.ldev_num = ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) mbox->cc.restore_consistency = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) mbox->cc.initialized_area_only = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) mbox->cc.ldev.ldev_num = ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) myrs_exec_cmd(cs, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) status = cmd_blk->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) mutex_unlock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) if (status != MYRS_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) sdev_printk(KERN_INFO, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) "Consistency Check Not %s, status 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) check ? "Initiated" : "Cancelled", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) check ? "Initiated" : "Cancelled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) ret = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static DEVICE_ATTR_RW(consistency_check);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) static struct device_attribute *myrs_sdev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) &dev_attr_consistency_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) &dev_attr_rebuild,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) &dev_attr_raid_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) &dev_attr_raid_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) static ssize_t serial_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) struct myrs_hba *cs = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) char serial[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) memcpy(serial, cs->ctlr_info->serial_number, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) serial[16] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) return snprintf(buf, 16, "%s\n", serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) static DEVICE_ATTR_RO(serial);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) static ssize_t ctlr_num_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) struct myrs_hba *cs = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) return snprintf(buf, 20, "%d\n", cs->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) static DEVICE_ATTR_RO(ctlr_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) static struct myrs_cpu_type_tbl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) enum myrs_cpu_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) } myrs_cpu_type_names[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) { MYRS_CPUTYPE_i960CA, "i960CA" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) { MYRS_CPUTYPE_i960RD, "i960RD" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) { MYRS_CPUTYPE_i960RN, "i960RN" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) { MYRS_CPUTYPE_i960RP, "i960RP" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) { MYRS_CPUTYPE_NorthBay, "NorthBay" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) { MYRS_CPUTYPE_StrongArm, "StrongARM" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) { MYRS_CPUTYPE_i960RM, "i960RM" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) static ssize_t processor_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) struct myrs_hba *cs = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) struct myrs_cpu_type_tbl *tbl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) const char *first_processor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) const char *second_processor = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) struct myrs_ctlr_info *info = cs->ctlr_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) if (info->cpu[0].cpu_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) tbl = myrs_cpu_type_names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (tbl[i].type == info->cpu[0].cpu_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) first_processor = tbl[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) if (info->cpu[1].cpu_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) tbl = myrs_cpu_type_names;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) if (tbl[i].type == info->cpu[1].cpu_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) second_processor = tbl[i].name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (first_processor && second_processor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) "2: %s (%s, %d cpus)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) info->cpu[0].cpu_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) first_processor, info->cpu[0].cpu_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) info->cpu[1].cpu_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) second_processor, info->cpu[1].cpu_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) else if (first_processor && !second_processor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) info->cpu[0].cpu_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) first_processor, info->cpu[0].cpu_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) else if (!first_processor && second_processor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) info->cpu[1].cpu_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) second_processor, info->cpu[1].cpu_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) ret = snprintf(buf, 64, "1: absent\n2: absent\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) static DEVICE_ATTR_RO(processor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) static ssize_t model_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct myrs_hba *cs = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) return snprintf(buf, 28, "%s\n", cs->model_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) static DEVICE_ATTR_RO(model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) static ssize_t ctlr_type_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) struct myrs_hba *cs = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) static DEVICE_ATTR_RO(ctlr_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) static ssize_t cache_size_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) struct myrs_hba *cs = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) static DEVICE_ATTR_RO(cache_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) static ssize_t firmware_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) struct myrs_hba *cs = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) return snprintf(buf, 16, "%d.%02d-%02d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) cs->ctlr_info->fw_major_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) cs->ctlr_info->fw_minor_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) cs->ctlr_info->fw_turn_number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) static DEVICE_ATTR_RO(firmware);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) static ssize_t discovery_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) struct myrs_hba *cs = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) struct myrs_cmdblk *cmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) union myrs_cmd_mbox *mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) mutex_lock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) cmd_blk = &cs->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) myrs_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) mbox->common.opcode = MYRS_CMD_OP_IOCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) mbox->common.id = MYRS_DCMD_TAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) mbox->common.control.dma_ctrl_to_host = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) mbox->common.control.no_autosense = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) myrs_exec_cmd(cs, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) status = cmd_blk->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) mutex_unlock(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (status != MYRS_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) "Discovery Not Initiated, status %02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) shost_printk(KERN_INFO, shost, "Discovery Initiated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) cs->next_evseq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) cs->needs_update = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) flush_delayed_work(&cs->monitor_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) shost_printk(KERN_INFO, shost, "Discovery Completed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) static DEVICE_ATTR_WO(discovery);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) static ssize_t flush_cache_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) struct myrs_hba *cs = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) MYRS_RAID_CONTROLLER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) if (status == MYRS_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) shost_printk(KERN_INFO, shost, "Cache Flush Completed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) shost_printk(KERN_INFO, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) "Cache Flush failed, status 0x%02x\n", status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) static DEVICE_ATTR_WO(flush_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static ssize_t disable_enclosure_messages_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) struct myrs_hba *cs = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) return snprintf(buf, 3, "%d\n", cs->disable_enc_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) static ssize_t disable_enclosure_messages_store(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) struct device_attribute *attr, const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) struct myrs_hba *cs = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) int value, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) ret = kstrtoint(buf, 0, &value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) if (value > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) cs->disable_enc_msg = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) static DEVICE_ATTR_RW(disable_enclosure_messages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) static struct device_attribute *myrs_shost_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) &dev_attr_serial,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) &dev_attr_ctlr_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) &dev_attr_processor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) &dev_attr_model,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) &dev_attr_ctlr_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) &dev_attr_cache_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) &dev_attr_firmware,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) &dev_attr_discovery,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) &dev_attr_flush_cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) &dev_attr_disable_enclosure_messages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * SCSI midlayer interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) static int myrs_host_reset(struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) struct Scsi_Host *shost = scmd->device->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) struct myrs_hba *cs = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) cs->reset(cs->io_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) struct myrs_ldev_info *ldev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) unsigned char modes[32], *mode_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) bool dbd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) size_t mode_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) dbd = (scmd->cmnd[1] & 0x08) == 0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) if (dbd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) mode_len = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) mode_pg = &modes[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) mode_len = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) mode_pg = &modes[12];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) memset(modes, 0, sizeof(modes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) modes[0] = mode_len - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) modes[2] = 0x10; /* Enable FUA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) modes[2] |= 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) if (!dbd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) unsigned char *block_desc = &modes[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) modes[3] = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) mode_pg[0] = 0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) mode_pg[1] = 0x12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) mode_pg[2] |= 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) mode_pg[2] |= 0x04;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) if (ldev_info->cacheline_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) mode_pg[2] |= 0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) put_unaligned_be16(1 << ldev_info->cacheline_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) &mode_pg[14]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) scsi_sg_copy_from_buffer(scmd, modes, mode_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) static int myrs_queuecommand(struct Scsi_Host *shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) struct myrs_hba *cs = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) struct scsi_device *sdev = scmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) union myrs_sgl *hw_sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) dma_addr_t sense_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) struct scatterlist *sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) unsigned long flags, timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) int nsge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) if (!scmd->device->hostdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) scmd->result = (DID_NO_CONNECT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) switch (scmd->cmnd[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) case REPORT_LUNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 0x20, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) case MODE_SENSE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) struct myrs_ldev_info *ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) (scmd->cmnd[2] & 0x3F) != 0x08) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) /* Illegal request, invalid field in CDB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) scsi_build_sense_buffer(0, scmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) ILLEGAL_REQUEST, 0x24, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) scmd->result = (DRIVER_SENSE << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) myrs_mode_sense(cs, scmd, ldev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) scmd->result = (DID_OK << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) myrs_reset_cmd(cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) &sense_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) if (!cmd_blk->sense)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) cmd_blk->sense_addr = sense_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) timeout = scmd->request->timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) if (scmd->cmd_len <= 10) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) struct myrs_ldev_info *ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) mbox->SCSI_10.pdev.lun = ldev_info->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) mbox->SCSI_10.pdev.target = ldev_info->target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) mbox->SCSI_10.pdev.channel = ldev_info->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) mbox->SCSI_10.pdev.ctlr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) mbox->SCSI_10.pdev.lun = sdev->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) mbox->SCSI_10.pdev.target = sdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) mbox->SCSI_10.pdev.channel = sdev->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) mbox->SCSI_10.id = scmd->request->tag + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) mbox->SCSI_10.control.dma_ctrl_to_host =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) (scmd->sc_data_direction == DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) if (scmd->request->cmd_flags & REQ_FUA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) mbox->SCSI_10.control.fua = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) mbox->SCSI_10.cdb_len = scmd->cmd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) if (timeout > 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) mbox->SCSI_10.tmo.tmo_val = timeout / 60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) mbox->SCSI_10.tmo.tmo_val = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) hw_sge = &mbox->SCSI_10.dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) cmd_blk->dcdb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) dma_addr_t dcdb_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) &dcdb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) if (!cmd_blk->dcdb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) dma_pool_free(cs->sense_pool, cmd_blk->sense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) cmd_blk->sense_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) cmd_blk->sense = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) cmd_blk->sense_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) cmd_blk->dcdb_dma = dcdb_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) struct myrs_ldev_info *ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) mbox->SCSI_255.pdev.lun = ldev_info->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) mbox->SCSI_255.pdev.target = ldev_info->target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) mbox->SCSI_255.pdev.channel = ldev_info->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) mbox->SCSI_255.pdev.ctlr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) mbox->SCSI_255.pdev.lun = sdev->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) mbox->SCSI_255.pdev.target = sdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) mbox->SCSI_255.pdev.channel = sdev->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) mbox->SCSI_255.id = scmd->request->tag + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) mbox->SCSI_255.control.dma_ctrl_to_host =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) (scmd->sc_data_direction == DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) if (scmd->request->cmd_flags & REQ_FUA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) mbox->SCSI_255.control.fua = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) mbox->SCSI_255.cdb_len = scmd->cmd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (timeout > 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) mbox->SCSI_255.tmo.tmo_val = timeout / 60;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) mbox->SCSI_255.tmo.tmo_val = timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) hw_sge = &mbox->SCSI_255.dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) if (scmd->sc_data_direction == DMA_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) goto submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) nsge = scsi_dma_map(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) if (nsge == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) sgl = scsi_sglist(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) struct myrs_sge *hw_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) dma_addr_t hw_sgl_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) if (nsge > 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) &hw_sgl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) if (WARN_ON(!hw_sgl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) if (cmd_blk->dcdb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) dma_pool_free(cs->dcdb_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) cmd_blk->dcdb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) cmd_blk->dcdb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) cmd_blk->dcdb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) cmd_blk->dcdb_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) dma_pool_free(cs->sense_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) cmd_blk->sense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) cmd_blk->sense_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) cmd_blk->sense = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) cmd_blk->sense_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) cmd_blk->sgl = hw_sgl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) cmd_blk->sgl_addr = hw_sgl_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) if (scmd->cmd_len <= 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) mbox->SCSI_10.control.add_sge_mem = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) mbox->SCSI_255.control.add_sge_mem = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) hw_sge->ext.sge0_len = nsge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) hw_sge->ext.sge0_addr = cmd_blk->sgl_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) hw_sgl = hw_sge->sge;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) scsi_for_each_sg(scmd, sgl, nsge, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) if (WARN_ON(!hw_sgl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) scsi_dma_unmap(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) scmd->result = (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) hw_sgl->sge_addr = (u64)sg_dma_address(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) hw_sgl->sge_count = (u64)sg_dma_len(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) hw_sgl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) submit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) spin_lock_irqsave(&cs->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) myrs_qcmd(cs, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) spin_unlock_irqrestore(&cs->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) unsigned short ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) unsigned int chan_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) sdev->channel - cs->ctlr_info->physchan_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) ldev_num = sdev->id + chan_offset * sdev->host->max_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) return ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) static int myrs_slave_alloc(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) struct myrs_hba *cs = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) if (sdev->channel > sdev->host->max_channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (sdev->channel >= cs->ctlr_info->physchan_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) struct myrs_ldev_info *ldev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) unsigned short ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) if (sdev->lun > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) ldev_num = myrs_translate_ldev(cs, sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (!ldev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (status != MYRS_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) sdev->hostdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) kfree(ldev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) enum raid_level level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) dev_dbg(&sdev->sdev_gendev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) "Logical device mapping %d:%d:%d -> %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) ldev_info->channel, ldev_info->target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) ldev_info->lun, ldev_info->ldev_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) sdev->hostdata = ldev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) switch (ldev_info->raid_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) case MYRS_RAID_LEVEL0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) level = RAID_LEVEL_LINEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) case MYRS_RAID_LEVEL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) level = RAID_LEVEL_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) case MYRS_RAID_LEVEL3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) case MYRS_RAID_LEVEL3F:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) case MYRS_RAID_LEVEL3L:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) level = RAID_LEVEL_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) case MYRS_RAID_LEVEL5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) case MYRS_RAID_LEVEL5L:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) level = RAID_LEVEL_5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) case MYRS_RAID_LEVEL6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) level = RAID_LEVEL_6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) case MYRS_RAID_LEVELE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) case MYRS_RAID_NEWSPAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) case MYRS_RAID_SPAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) level = RAID_LEVEL_LINEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) case MYRS_RAID_JBOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) level = RAID_LEVEL_JBOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) level = RAID_LEVEL_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) raid_set_level(myrs_raid_template,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) &sdev->sdev_gendev, level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) name = myrs_devstate_name(ldev_info->dev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) sdev_printk(KERN_DEBUG, sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) "logical device in state %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) name ? name : "Invalid");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) struct myrs_pdev_info *pdev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) if (!pdev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) status = myrs_get_pdev_info(cs, sdev->channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) sdev->id, sdev->lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) pdev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) if (status != MYRS_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) sdev->hostdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) kfree(pdev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) sdev->hostdata = pdev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) static int myrs_slave_configure(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) struct myrs_hba *cs = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) struct myrs_ldev_info *ldev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) if (sdev->channel > sdev->host->max_channel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) if (sdev->channel < cs->ctlr_info->physchan_present) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) /* Skip HBA device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (sdev->type == TYPE_RAID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) sdev->no_uld_attach = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) if (sdev->lun != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) if (!ldev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) sdev->wce_default_on = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) sdev->tagged_supported = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) static void myrs_slave_destroy(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) kfree(sdev->hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) static struct scsi_host_template myrs_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) .name = "DAC960",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) .proc_name = "myrs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) .queuecommand = myrs_queuecommand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) .eh_host_reset_handler = myrs_host_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) .slave_alloc = myrs_slave_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) .slave_configure = myrs_slave_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) .slave_destroy = myrs_slave_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) .cmd_size = sizeof(struct myrs_cmdblk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) .shost_attrs = myrs_shost_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) .sdev_attrs = myrs_sdev_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) .this_id = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) const struct pci_device_id *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) struct Scsi_Host *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) struct myrs_hba *cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) if (!shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) shost->max_cmd_len = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) shost->max_lun = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) cs = shost_priv(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) mutex_init(&cs->dcmd_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) mutex_init(&cs->cinfo_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) cs->host = shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) return cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) * RAID template functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) * myrs_is_raid - return boolean indicating device is raid volume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) * @dev the device struct object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) myrs_is_raid(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) struct myrs_hba *cs = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) * myrs_get_resync - get raid volume resync percent complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) * @dev the device struct object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) myrs_get_resync(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) struct myrs_hba *cs = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) struct myrs_ldev_info *ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) u64 percent_complete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) if (ldev_info->rbld_active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) unsigned short ldev_num = ldev_info->ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) percent_complete = ldev_info->rbld_lba * 100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) do_div(percent_complete, ldev_info->cfg_devsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) raid_set_resync(myrs_raid_template, dev, percent_complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) * myrs_get_state - get raid volume status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) * @dev the device struct object
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) myrs_get_state(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) struct myrs_hba *cs = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) struct myrs_ldev_info *ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) enum raid_state state = RAID_STATE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) state = RAID_STATE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) switch (ldev_info->dev_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) case MYRS_DEVICE_ONLINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) state = RAID_STATE_ACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) case MYRS_DEVICE_SUSPECTED_CRITICAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) case MYRS_DEVICE_CRITICAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) state = RAID_STATE_DEGRADED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) case MYRS_DEVICE_REBUILD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) state = RAID_STATE_RESYNCING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) case MYRS_DEVICE_UNCONFIGURED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) case MYRS_DEVICE_INVALID_STATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) state = RAID_STATE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) state = RAID_STATE_OFFLINE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) raid_set_state(myrs_raid_template, dev, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) static struct raid_function_template myrs_raid_functions = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) .cookie = &myrs_template,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) .is_raid = myrs_is_raid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) .get_resync = myrs_get_resync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) .get_state = myrs_get_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) * PCI interface functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) static void myrs_flush_cache(struct myrs_hba *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) if (!cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) scsi_dma_unmap(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) status = cmd_blk->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) if (cmd_blk->sense) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) unsigned int sense_len = SCSI_SENSE_BUFFERSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (sense_len > cmd_blk->sense_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) sense_len = cmd_blk->sense_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) dma_pool_free(cs->sense_pool, cmd_blk->sense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) cmd_blk->sense_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) cmd_blk->sense = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) cmd_blk->sense_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) if (cmd_blk->dcdb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) cmd_blk->dcdb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) cmd_blk->dcdb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) cmd_blk->dcdb_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) if (cmd_blk->sgl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) dma_pool_free(cs->sg_pool, cmd_blk->sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) cmd_blk->sgl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) cmd_blk->sgl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) cmd_blk->sgl_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) if (cmd_blk->residual)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) scsi_set_resid(scmd, cmd_blk->residual);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) scmd->result = (DID_BAD_TARGET << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) scmd->result = (DID_OK << 16) | status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) scmd->scsi_done(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if (!cmd_blk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) if (cmd_blk->complete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) complete(cmd_blk->complete);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) cmd_blk->complete = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) static void myrs_monitor(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) struct myrs_hba *cs = container_of(work, struct myrs_hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) monitor_work.work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) struct Scsi_Host *shost = cs->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) struct myrs_ctlr_info *info = cs->ctlr_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) unsigned int epoch = cs->fwstat_buf->epoch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) dev_dbg(&shost->shost_gendev, "monitor tick\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) status = myrs_get_fwstatus(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) if (cs->needs_update) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) cs->needs_update = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) mutex_lock(&cs->cinfo_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) status = myrs_get_ctlr_info(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) mutex_unlock(&cs->cinfo_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) status = myrs_get_event(cs, cs->next_evseq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) cs->event_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) if (status == MYRS_STATUS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) myrs_log_event(cs, cs->event_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) cs->next_evseq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) interval = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) if (time_after(jiffies, cs->secondary_monitor_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) + MYRS_SECONDARY_MONITOR_INTERVAL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) cs->secondary_monitor_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) if (info->bg_init_active +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) info->ldev_init_active +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) info->pdev_init_active +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) info->cc_active +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) info->rbld_active +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) info->exp_active != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) shost_for_each_device(sdev, shost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) struct myrs_ldev_info *ldev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) int ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) if (sdev->channel < info->physchan_present)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) ldev_info = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) if (!ldev_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) ldev_num = ldev_info->ldev_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) myrs_get_ldev_info(cs, ldev_num, ldev_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) cs->needs_update = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) if (epoch == cs->epoch &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) cs->fwstat_buf->next_evseq == cs->next_evseq &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) (cs->needs_update == false ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) time_before(jiffies, cs->primary_monitor_time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) + MYRS_PRIMARY_MONITOR_INTERVAL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) interval = MYRS_SECONDARY_MONITOR_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) if (interval > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) cs->primary_monitor_time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) queue_delayed_work(cs->work_q, &cs->monitor_work, interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) struct Scsi_Host *shost = cs->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) size_t elem_size, elem_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) elem_align = sizeof(struct myrs_sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) elem_size = shost->sg_tablesize * elem_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) elem_size, elem_align, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) if (cs->sg_pool == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) shost_printk(KERN_ERR, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) "Failed to allocate SG pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) MYRS_SENSE_SIZE, sizeof(int), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) if (cs->sense_pool == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) dma_pool_destroy(cs->sg_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) cs->sg_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) shost_printk(KERN_ERR, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) "Failed to allocate sense data pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) MYRS_DCDB_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) sizeof(unsigned char), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) if (!cs->dcdb_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) dma_pool_destroy(cs->sg_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) cs->sg_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) dma_pool_destroy(cs->sense_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) cs->sense_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) shost_printk(KERN_ERR, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) "Failed to allocate DCDB pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) snprintf(cs->work_q_name, sizeof(cs->work_q_name),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) "myrs_wq_%d", shost->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) cs->work_q = create_singlethread_workqueue(cs->work_q_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (!cs->work_q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) dma_pool_destroy(cs->dcdb_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) cs->dcdb_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) dma_pool_destroy(cs->sg_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) cs->sg_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) dma_pool_destroy(cs->sense_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) cs->sense_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) shost_printk(KERN_ERR, shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) "Failed to create workqueue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) /* Initialize the Monitoring Timer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) static void myrs_destroy_mempools(struct myrs_hba *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) cancel_delayed_work_sync(&cs->monitor_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) destroy_workqueue(cs->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) dma_pool_destroy(cs->sg_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) dma_pool_destroy(cs->dcdb_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) dma_pool_destroy(cs->sense_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) static void myrs_unmap(struct myrs_hba *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) kfree(cs->event_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) kfree(cs->ctlr_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) if (cs->fwstat_buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) cs->fwstat_buf, cs->fwstat_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) cs->fwstat_buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) if (cs->first_stat_mbox) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) cs->first_stat_mbox, cs->stat_mbox_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) cs->first_stat_mbox = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) if (cs->first_cmd_mbox) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) cs->first_cmd_mbox, cs->cmd_mbox_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) cs->first_cmd_mbox = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) static void myrs_cleanup(struct myrs_hba *cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) struct pci_dev *pdev = cs->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) /* Free the memory mailbox, status, and related structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) myrs_unmap(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) if (cs->mmio_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) if (cs->disable_intr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) cs->disable_intr(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) iounmap(cs->mmio_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) cs->mmio_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) if (cs->irq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) free_irq(cs->irq, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) if (cs->io_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) release_region(cs->io_addr, 0x80);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) pci_set_drvdata(pdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) scsi_host_put(cs->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) const struct pci_device_id *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) struct myrs_privdata *privdata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) (struct myrs_privdata *)entry->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) irq_handler_t irq_handler = privdata->irq_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) unsigned int mmio_size = privdata->mmio_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) struct myrs_hba *cs = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) cs = myrs_alloc_host(pdev, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) if (!cs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) dev_err(&pdev->dev, "Unable to allocate Controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) cs->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) if (pci_enable_device(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) goto Failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) cs->pci_addr = pci_resource_start(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) pci_set_drvdata(pdev, cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) spin_lock_init(&cs->queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) /* Map the Controller Register Window. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) if (mmio_size < PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) mmio_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) if (cs->mmio_base == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) "Unable to map Controller Register Window\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) goto Failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) if (privdata->hw_init(pdev, cs, cs->io_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) goto Failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) /* Acquire shared access to the IRQ Channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) "Unable to acquire IRQ Channel %d\n", pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) goto Failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) cs->irq = pdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) return cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) Failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) "Failed to initialize Controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) myrs_cleanup(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) * myrs_err_status reports Controller BIOS Messages passed through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) * the Error Status Register when the driver performs the BIOS handshaking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) * It returns true for fatal errors and false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) static bool myrs_err_status(struct myrs_hba *cs, unsigned char status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) unsigned char parm0, unsigned char parm1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) struct pci_dev *pdev = cs->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) switch (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) case 0x00:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) dev_info(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) "Physical Device %d:%d Not Responding\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) parm1, parm0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) case 0x08:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) dev_notice(&pdev->dev, "Spinning Up Drives\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) case 0x30:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) dev_notice(&pdev->dev, "Configuration Checksum Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) case 0x60:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) case 0x70:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) case 0x90:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) parm1, parm0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) case 0xA0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) case 0xB0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) case 0xD0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) dev_notice(&pdev->dev, "New Controller Configuration Found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) case 0xF0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) * Hardware-specific functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) * DAC960 GEM Series Controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) static inline void DAC960_GEM_gen_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) __le32 val = cpu_to_le32(DAC960_GEM_IDB_GEN_IRQ << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) static inline void DAC960_GEM_reset_ctrl(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) __le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) __le32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) static inline bool DAC960_GEM_init_in_progress(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) __le32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) __le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) static inline void DAC960_GEM_ack_mem_mbox_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) __le32 val = cpu_to_le32(DAC960_GEM_ODB_MMBOX_ACK_IRQ << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) static inline void DAC960_GEM_ack_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) __le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) __le32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) static inline bool DAC960_GEM_mem_mbox_status_available(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) __le32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_MMBOX_STS_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) static inline void DAC960_GEM_enable_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) __le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) static inline void DAC960_GEM_disable_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) __le32 val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) static inline bool DAC960_GEM_intr_enabled(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) __le32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) val = readl(base + DAC960_GEM_IRQMASK_READ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) return !((le32_to_cpu(val) >> 24) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) (DAC960_GEM_IRQMASK_HWMBOX_IRQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) DAC960_GEM_IRQMASK_MMBOX_IRQ));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) union myrs_cmd_mbox *mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) memcpy(&mem_mbox->words[1], &mbox->words[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) /* Barrier to avoid reordering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) mem_mbox->words[0] = mbox->words[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) /* Barrier to force PCI access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) static inline void DAC960_GEM_write_hw_mbox(void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) dma_addr_t cmd_mbox_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) static inline unsigned short DAC960_GEM_read_cmd_ident(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) return readw(base + DAC960_GEM_CMDSTS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) unsigned char *param0, unsigned char *param1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) __le32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) *error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) *param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) *param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) static inline unsigned char
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) while (DAC960_GEM_hw_mbox_is_full(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) DAC960_GEM_write_hw_mbox(base, mbox_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) DAC960_GEM_hw_mbox_new_cmd(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) while (!DAC960_GEM_hw_mbox_status_available(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) status = DAC960_GEM_read_cmd_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) DAC960_GEM_ack_hw_mbox_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) DAC960_GEM_ack_hw_mbox_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) static int DAC960_GEM_hw_init(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) struct myrs_hba *cs, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) int timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) unsigned char status, parm0, parm1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) DAC960_GEM_disable_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) DAC960_GEM_ack_hw_mbox_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) while (DAC960_GEM_init_in_progress(base) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) timeout < MYRS_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) if (DAC960_GEM_read_error_status(base, &status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) &parm0, &parm1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) myrs_err_status(cs, status, parm0, parm1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) timeout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) if (timeout == MYRS_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) "Timeout waiting for Controller Initialisation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) "Unable to Enable Memory Mailbox Interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) DAC960_GEM_reset_ctrl(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) DAC960_GEM_enable_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) cs->disable_intr = DAC960_GEM_disable_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) cs->reset = DAC960_GEM_reset_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) struct myrs_hba *cs = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) void __iomem *base = cs->io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) struct myrs_stat_mbox *next_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) spin_lock_irqsave(&cs->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) DAC960_GEM_ack_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) next_stat_mbox = cs->next_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) while (next_stat_mbox->id > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) unsigned short id = next_stat_mbox->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) struct scsi_cmnd *scmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) struct myrs_cmdblk *cmd_blk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) if (id == MYRS_DCMD_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) cmd_blk = &cs->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) else if (id == MYRS_MCMD_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) cmd_blk = &cs->mcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) scmd = scsi_host_find_tag(cs->host, id - 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) if (scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) cmd_blk = scsi_cmd_priv(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) if (cmd_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) cmd_blk->status = next_stat_mbox->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) cmd_blk->sense_len = next_stat_mbox->sense_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) cmd_blk->residual = next_stat_mbox->residual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) dev_err(&cs->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) "Unhandled command completion %d\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) if (++next_stat_mbox > cs->last_stat_mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) next_stat_mbox = cs->first_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) if (cmd_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) if (id < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) myrs_handle_cmdblk(cs, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) myrs_handle_scsi(cs, cmd_blk, scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) cs->next_stat_mbox = next_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) spin_unlock_irqrestore(&cs->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) struct myrs_privdata DAC960_GEM_privdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) .hw_init = DAC960_GEM_hw_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) .irq_handler = DAC960_GEM_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) .mmio_size = DAC960_GEM_mmio_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) * DAC960 BA Series Controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) static inline void DAC960_BA_gen_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) writeb(DAC960_BA_IDB_GEN_IRQ, base + DAC960_BA_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) static inline void DAC960_BA_reset_ctrl(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) val = readb(base + DAC960_BA_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) return !(val & DAC960_BA_IDB_HWMBOX_EMPTY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) static inline bool DAC960_BA_init_in_progress(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) val = readb(base + DAC960_BA_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) return !(val & DAC960_BA_IDB_INIT_DONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) static inline void DAC960_BA_ack_mem_mbox_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) writeb(DAC960_BA_ODB_MMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) static inline void DAC960_BA_ack_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) base + DAC960_BA_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) val = readb(base + DAC960_BA_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) static inline bool DAC960_BA_mem_mbox_status_available(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) val = readb(base + DAC960_BA_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) return val & DAC960_BA_ODB_MMBOX_STS_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) static inline void DAC960_BA_enable_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) static inline void DAC960_BA_disable_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) static inline bool DAC960_BA_intr_enabled(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) val = readb(base + DAC960_BA_IRQMASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) return !(val & DAC960_BA_IRQMASK_DISABLE_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) union myrs_cmd_mbox *mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) memcpy(&mem_mbox->words[1], &mbox->words[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) /* Barrier to avoid reordering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) mem_mbox->words[0] = mbox->words[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) /* Barrier to force PCI access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) static inline void DAC960_BA_write_hw_mbox(void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) dma_addr_t cmd_mbox_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) static inline unsigned short DAC960_BA_read_cmd_ident(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) return readw(base + DAC960_BA_CMDSTS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) return readw(base + DAC960_BA_CMDSTS_OFFSET + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) DAC960_BA_read_error_status(void __iomem *base, unsigned char *error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) unsigned char *param0, unsigned char *param1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) val = readb(base + DAC960_BA_ERRSTS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) if (!(val & DAC960_BA_ERRSTS_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) val &= ~DAC960_BA_ERRSTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) *error = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) *param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) *param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) static inline unsigned char
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) while (DAC960_BA_hw_mbox_is_full(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) DAC960_BA_write_hw_mbox(base, mbox_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) DAC960_BA_hw_mbox_new_cmd(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) while (!DAC960_BA_hw_mbox_status_available(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) status = DAC960_BA_read_cmd_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) DAC960_BA_ack_hw_mbox_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) DAC960_BA_ack_hw_mbox_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) static int DAC960_BA_hw_init(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) struct myrs_hba *cs, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) int timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) unsigned char status, parm0, parm1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) DAC960_BA_disable_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) DAC960_BA_ack_hw_mbox_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) while (DAC960_BA_init_in_progress(base) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) timeout < MYRS_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) if (DAC960_BA_read_error_status(base, &status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) &parm0, &parm1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) myrs_err_status(cs, status, parm0, parm1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) timeout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) if (timeout == MYRS_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) "Timeout waiting for Controller Initialisation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) "Unable to Enable Memory Mailbox Interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) DAC960_BA_reset_ctrl(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) DAC960_BA_enable_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) cs->disable_intr = DAC960_BA_disable_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) cs->reset = DAC960_BA_reset_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) struct myrs_hba *cs = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) void __iomem *base = cs->io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) struct myrs_stat_mbox *next_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) spin_lock_irqsave(&cs->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) DAC960_BA_ack_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) next_stat_mbox = cs->next_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) while (next_stat_mbox->id > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) unsigned short id = next_stat_mbox->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) struct scsi_cmnd *scmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) struct myrs_cmdblk *cmd_blk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) if (id == MYRS_DCMD_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) cmd_blk = &cs->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) else if (id == MYRS_MCMD_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) cmd_blk = &cs->mcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) scmd = scsi_host_find_tag(cs->host, id - 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) if (scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) cmd_blk = scsi_cmd_priv(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) if (cmd_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) cmd_blk->status = next_stat_mbox->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) cmd_blk->sense_len = next_stat_mbox->sense_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) cmd_blk->residual = next_stat_mbox->residual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) dev_err(&cs->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) "Unhandled command completion %d\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) if (++next_stat_mbox > cs->last_stat_mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) next_stat_mbox = cs->first_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) if (cmd_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) if (id < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) myrs_handle_cmdblk(cs, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) myrs_handle_scsi(cs, cmd_blk, scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) cs->next_stat_mbox = next_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) spin_unlock_irqrestore(&cs->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) struct myrs_privdata DAC960_BA_privdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) .hw_init = DAC960_BA_hw_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) .irq_handler = DAC960_BA_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) .mmio_size = DAC960_BA_mmio_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) * DAC960 LP Series Controllers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) static inline void DAC960_LP_gen_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) writeb(DAC960_LP_IDB_GEN_IRQ, base + DAC960_LP_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) static inline void DAC960_LP_reset_ctrl(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) val = readb(base + DAC960_LP_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) return val & DAC960_LP_IDB_HWMBOX_FULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) static inline bool DAC960_LP_init_in_progress(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) val = readb(base + DAC960_LP_IDB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) return val & DAC960_LP_IDB_INIT_IN_PROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) static inline void DAC960_LP_ack_mem_mbox_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) writeb(DAC960_LP_ODB_MMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) static inline void DAC960_LP_ack_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) base + DAC960_LP_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) val = readb(base + DAC960_LP_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) static inline bool DAC960_LP_mem_mbox_status_available(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) val = readb(base + DAC960_LP_ODB_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) return val & DAC960_LP_ODB_MMBOX_STS_AVAIL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) static inline void DAC960_LP_enable_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) static inline void DAC960_LP_disable_intr(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) static inline bool DAC960_LP_intr_enabled(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) val = readb(base + DAC960_LP_IRQMASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) return !(val & DAC960_LP_IRQMASK_DISABLE_IRQ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) union myrs_cmd_mbox *mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) memcpy(&mem_mbox->words[1], &mbox->words[1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) /* Barrier to avoid reordering */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) mem_mbox->words[0] = mbox->words[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) /* Barrier to force PCI access */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) static inline void DAC960_LP_write_hw_mbox(void __iomem *base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) dma_addr_t cmd_mbox_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032) static inline unsigned short DAC960_LP_read_cmd_ident(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) return readw(base + DAC960_LP_CMDSTS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) return readw(base + DAC960_LP_CMDSTS_OFFSET + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) static inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) DAC960_LP_read_error_status(void __iomem *base, unsigned char *error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) unsigned char *param0, unsigned char *param1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) val = readb(base + DAC960_LP_ERRSTS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) if (!(val & DAC960_LP_ERRSTS_PENDING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) val &= ~DAC960_LP_ERRSTS_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) *error = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) *param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) *param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) static inline unsigned char
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) unsigned char status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) while (DAC960_LP_hw_mbox_is_full(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) DAC960_LP_write_hw_mbox(base, mbox_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) DAC960_LP_hw_mbox_new_cmd(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) while (!DAC960_LP_hw_mbox_status_available(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) status = DAC960_LP_read_cmd_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) DAC960_LP_ack_hw_mbox_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) DAC960_LP_ack_hw_mbox_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) static int DAC960_LP_hw_init(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) struct myrs_hba *cs, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) int timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) unsigned char status, parm0, parm1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) DAC960_LP_disable_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) DAC960_LP_ack_hw_mbox_status(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) udelay(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) while (DAC960_LP_init_in_progress(base) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) timeout < MYRS_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) if (DAC960_LP_read_error_status(base, &status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) &parm0, &parm1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) myrs_err_status(cs, status, parm0, parm1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) timeout++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) if (timeout == MYRS_MAILBOX_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) "Timeout waiting for Controller Initialisation\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) "Unable to Enable Memory Mailbox Interface\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) DAC960_LP_reset_ctrl(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) DAC960_LP_enable_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) cs->disable_intr = DAC960_LP_disable_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) cs->reset = DAC960_LP_reset_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) struct myrs_hba *cs = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) void __iomem *base = cs->io_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) struct myrs_stat_mbox *next_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) spin_lock_irqsave(&cs->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) DAC960_LP_ack_intr(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) next_stat_mbox = cs->next_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) while (next_stat_mbox->id > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) unsigned short id = next_stat_mbox->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) struct scsi_cmnd *scmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) struct myrs_cmdblk *cmd_blk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) if (id == MYRS_DCMD_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) cmd_blk = &cs->dcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) else if (id == MYRS_MCMD_TAG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) cmd_blk = &cs->mcmd_blk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) scmd = scsi_host_find_tag(cs->host, id - 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) if (scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) cmd_blk = scsi_cmd_priv(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) if (cmd_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) cmd_blk->status = next_stat_mbox->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) cmd_blk->sense_len = next_stat_mbox->sense_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) cmd_blk->residual = next_stat_mbox->residual;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) dev_err(&cs->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) "Unhandled command completion %d\n", id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) if (++next_stat_mbox > cs->last_stat_mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) next_stat_mbox = cs->first_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) if (cmd_blk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) if (id < 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153) myrs_handle_cmdblk(cs, cmd_blk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) myrs_handle_scsi(cs, cmd_blk, scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) cs->next_stat_mbox = next_stat_mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) spin_unlock_irqrestore(&cs->queue_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) struct myrs_privdata DAC960_LP_privdata = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) .hw_init = DAC960_LP_hw_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) .irq_handler = DAC960_LP_intr_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) .mmio_size = DAC960_LP_mmio_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) * Module functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) struct myrs_hba *cs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) cs = myrs_detect(dev, entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) if (!cs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) ret = myrs_get_config(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) myrs_cleanup(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) if (!myrs_create_mempools(dev, cs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) ret = scsi_add_host(cs->host, &dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) myrs_destroy_mempools(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) goto failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) scsi_scan_host(cs->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) myrs_cleanup(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) static void myrs_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) struct myrs_hba *cs = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) if (cs == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) shost_printk(KERN_NOTICE, cs->host, "Flushing Cache...");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) myrs_flush_cache(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) myrs_destroy_mempools(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) myrs_cleanup(cs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) static const struct pci_device_id myrs_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) PCI_DEVICE_ID_MYLEX_DAC960_GEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) PCI_VENDOR_ID_MYLEX, PCI_ANY_ID),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) .driver_data = (unsigned long) &DAC960_GEM_privdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) {0, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) MODULE_DEVICE_TABLE(pci, myrs_id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) static struct pci_driver myrs_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) .name = "myrs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) .id_table = myrs_id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) .probe = myrs_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) .remove = myrs_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) static int __init myrs_init_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) myrs_raid_template = raid_class_attach(&myrs_raid_functions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) if (!myrs_raid_template)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) ret = pci_register_driver(&myrs_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) raid_class_release(myrs_raid_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) static void __exit myrs_cleanup_module(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) pci_unregister_driver(&myrs_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) raid_class_release(myrs_raid_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) module_init(myrs_init_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) module_exit(myrs_cleanup_module);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) MODULE_LICENSE("GPL");