^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written By: Anil Ravindranath<anil_ravindranath@pmc-sierra.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * PMC-Sierra Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2008, 2009 PMC Sierra Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/hdreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/libata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <scsi/scsi_eh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <scsi/scsicam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include "pmcraid.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * Module configuration parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static unsigned int pmcraid_debug_log;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static unsigned int pmcraid_disable_aen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static unsigned int pmcraid_enable_msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * Data structures to support multiple adapters by the LLD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * pmcraid_adapter_count - count of configured adapters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * Supporting user-level control interface through IOCTL commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * pmcraid_major - major number to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * pmcraid_minor - minor number(s) to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) static unsigned int pmcraid_major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static struct class *pmcraid_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * Module parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) MODULE_AUTHOR("Anil Ravindranath<anil_ravindranath@pmc-sierra.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) MODULE_VERSION(PMCRAID_DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) module_param_named(log_level, pmcraid_log_level, uint, (S_IRUGO | S_IWUSR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) MODULE_PARM_DESC(log_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) "Enables firmware error code logging, default :1 high-severity"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) " errors, 2: all errors including high-severity errors,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) " 0: disables logging");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) module_param_named(debug, pmcraid_debug_log, uint, (S_IRUGO | S_IWUSR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) MODULE_PARM_DESC(debug,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) "Enable driver verbose message logging. Set 1 to enable."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) "(default: 0)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) module_param_named(disable_aen, pmcraid_disable_aen, uint, (S_IRUGO | S_IWUSR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) MODULE_PARM_DESC(disable_aen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) "Disable driver aen notifications to apps. Set 1 to disable."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) "(default: 0)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* chip specific constants for PMC MaxRAID controllers (same for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * 0x5220 and 0x8010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) .ioastatus = 0x0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) .ioarrin = 0x00040,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) .mailbox = 0x7FC30,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) .global_intr_mask = 0x00034,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) .ioa_host_intr = 0x0009C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) .ioa_host_intr_clr = 0x000A0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) .ioa_host_msix_intr = 0x7FC40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) .ioa_host_mask = 0x7FC28,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) .ioa_host_mask_clr = 0x7FC28,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) .host_ioa_intr = 0x00020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) .host_ioa_intr_clr = 0x00020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) .transop_timeout = 300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * PCI device ids supported by pmcraid driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) static struct pci_device_id pmcraid_pci_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) { PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * pmcraid_slave_alloc - Prepare for commands to a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * @scsi_dev: scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) * This function is called by mid-layer prior to sending any command to the new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * device. Stores resource entry details of the device in scsi_device struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * Queuecommand uses the resource handle and other details to fill up IOARCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * while sending commands to the device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * 0 on success / -ENXIO if device does not exist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct pmcraid_resource_entry *temp, *res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) struct pmcraid_instance *pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) u8 target, bus, lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) u16 fw_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) pinstance = shost_priv(scsi_dev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* Driver exposes VSET and GSCSI resources only; all other device types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * are not exposed. Resource list is synchronized using resource lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * so any traversal or modifications to the list should be done inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * this lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) list_for_each_entry(temp, &pinstance->used_res_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) /* do not expose VSETs with order-ids > MAX_VSET_TARGETS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (RES_IS_VSET(temp->cfg_entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) if (fw_version <= PMCRAID_FW_VERSION_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) target = temp->cfg_entry.unique_flags1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) target = le16_to_cpu(temp->cfg_entry.array_id) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (target > PMCRAID_MAX_VSET_TARGETS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) bus = PMCRAID_VSET_BUS_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) lun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) } else if (RES_IS_GSCSI(temp->cfg_entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) target = RES_TARGET(temp->cfg_entry.resource_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) bus = PMCRAID_PHYS_BUS_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) lun = RES_LUN(temp->cfg_entry.resource_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) if (bus == scsi_dev->channel &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) target == scsi_dev->id &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) lun == scsi_dev->lun) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) res = temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) res->scsi_dev = scsi_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) scsi_dev->hostdata = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) res->change_detected = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) atomic_set(&res->read_failures, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) atomic_set(&res->write_failures, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * pmcraid_slave_configure - Configures a SCSI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * @scsi_dev: scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * This function is executed by SCSI mid layer just after a device is first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * scanned (i.e. it has responded to an INQUIRY). For VSET resources, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) * timeout value (default 30s) will be over-written to a higher value (60s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) * and max_sectors value will be over-written to 512. It also sets queue depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) * to host->cmd_per_lun value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct pmcraid_resource_entry *res = scsi_dev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /* LLD exposes VSETs and Enclosure devices only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (RES_IS_GSCSI(res->cfg_entry) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) scsi_dev->type != TYPE_ENCLOSURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) pmcraid_info("configuring %x:%x:%x:%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) scsi_dev->host->unique_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) scsi_dev->channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) scsi_dev->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) (u8)scsi_dev->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (RES_IS_GSCSI(res->cfg_entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) scsi_dev->allow_restart = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) } else if (RES_IS_VSET(res->cfg_entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) scsi_dev->allow_restart = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) blk_queue_rq_timeout(scsi_dev->request_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) PMCRAID_VSET_IO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) blk_queue_max_hw_sectors(scsi_dev->request_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) PMCRAID_VSET_MAX_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * We never want to report TCQ support for these types of devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (!RES_IS_GSCSI(res->cfg_entry) && !RES_IS_VSET(res->cfg_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) scsi_dev->tagged_supported = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * pmcraid_slave_destroy - Unconfigure a SCSI device before removing it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * @scsi_dev: scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * This is called by mid-layer before removing a device. Pointer assignments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * done in pmcraid_slave_alloc will be reset to NULL here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) struct pmcraid_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) res->scsi_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) scsi_dev->hostdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * pmcraid_change_queue_depth - Change the device's queue depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * @scsi_dev: scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * @depth: depth to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * actual depth set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (depth > PMCRAID_MAX_CMD_PER_LUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) depth = PMCRAID_MAX_CMD_PER_LUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return scsi_change_queue_depth(scsi_dev, depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * pmcraid_init_cmdblk - initializes a command block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) * @cmd: pointer to struct pmcraid_cmd to be initialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * @index: if >=0 first time initialization; otherwise reinitialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) dma_addr_t dma_addr = cmd->ioa_cb_bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) if (index >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /* first time initialization (called from probe) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) u32 ioasa_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) offsetof(struct pmcraid_control_block, ioasa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) cmd->index = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) ioarcb->response_handle = cpu_to_le32(index << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ioarcb->ioarcb_bus_addr = cpu_to_le64(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) ioarcb->ioasa_bus_addr = cpu_to_le64(dma_addr + ioasa_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ioarcb->ioasa_len = cpu_to_le16(sizeof(struct pmcraid_ioasa));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* re-initialization of various lengths, called once command is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * processed by IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) memset(&cmd->ioa_cb->ioarcb.cdb, 0, PMCRAID_MAX_CDB_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ioarcb->hrrq_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) ioarcb->request_flags0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ioarcb->request_flags1 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) ioarcb->cmd_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) ioarcb->ioarcb_bus_addr &= cpu_to_le64(~0x1FULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) ioarcb->ioadl_bus_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) ioarcb->ioadl_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) ioarcb->data_transfer_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) ioarcb->add_cmd_param_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) ioarcb->add_cmd_param_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) cmd->ioa_cb->ioasa.ioasc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) cmd->ioa_cb->ioasa.residual_data_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) cmd->time_left = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) cmd->cmd_done = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) cmd->scsi_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) cmd->release = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) cmd->completion_req = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) cmd->sense_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) cmd->sense_buffer_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) cmd->dma_handle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) timer_setup(&cmd->timer, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * pmcraid_reinit_cmdblk - reinitialize a command block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * @cmd: pointer to struct pmcraid_cmd to be reinitialized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) static void pmcraid_reinit_cmdblk(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) pmcraid_init_cmdblk(cmd, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * pmcraid_get_free_cmd - get a free cmd block from command block pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * @pinstance: adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * Return Value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * returns pointer to cmd block or NULL if no blocks are available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) static struct pmcraid_cmd *pmcraid_get_free_cmd(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct pmcraid_instance *pinstance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) struct pmcraid_cmd *cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* free cmd block list is protected by free_pool_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (!list_empty(&pinstance->free_cmd_pool)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) cmd = list_entry(pinstance->free_cmd_pool.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct pmcraid_cmd, free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) list_del(&cmd->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /* Initialize the command block before giving it the caller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (cmd != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) pmcraid_reinit_cmdblk(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * pmcraid_return_cmd - return a completed command block back into free pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * @cmd: pointer to the command block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * Return Value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) list_add_tail(&cmd->free_list, &pinstance->free_cmd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) * pmcraid_read_interrupts - reads IOA interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * @pinstance: pointer to adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * interrupts read from IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static u32 pmcraid_read_interrupts(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return (pinstance->interrupt_mode) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) ioread32(pinstance->int_regs.ioa_host_msix_interrupt_reg) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * pmcraid_disable_interrupts - Masks and clears all specified interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * @pinstance: pointer to per adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * @intrs: interrupts to disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) static void pmcraid_disable_interrupts(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) struct pmcraid_instance *pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) u32 intrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) u32 nmask = gmask | GLOBAL_INTERRUPT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ioread32(pinstance->int_regs.global_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) if (!pinstance->interrupt_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) iowrite32(intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) pinstance->int_regs.ioa_host_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * pmcraid_enable_interrupts - Enables specified interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * @pinstance: pointer to per adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * @intr: interrupts to enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) static void pmcraid_enable_interrupts(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) struct pmcraid_instance *pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) u32 intrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!pinstance->interrupt_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) iowrite32(~intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) pinstance->int_regs.ioa_host_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) pmcraid_info("enabled interrupts global mask = %x intr_mask = %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) ioread32(pinstance->int_regs.global_interrupt_mask_reg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * pmcraid_clr_trans_op - clear trans to op interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * @pinstance: pointer to per adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) static void pmcraid_clr_trans_op(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct pmcraid_instance *pinstance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (!pinstance->interrupt_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) pinstance->int_regs.ioa_host_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) pinstance->int_regs.ioa_host_interrupt_clr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) ioread32(pinstance->int_regs.ioa_host_interrupt_clr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (pinstance->reset_cmd != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) del_timer(&pinstance->reset_cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) spin_lock_irqsave(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) pinstance->reset_cmd->cmd_done(pinstance->reset_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) spin_unlock_irqrestore(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * pmcraid_reset_type - Determine the required reset type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * @pinstance: pointer to adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * IOA requires hard reset if any of the following conditions is true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * 1. If HRRQ valid interrupt is not masked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * 2. IOA reset alert doorbell is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) * 3. If there are any error interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) u32 intrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) u32 alerts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) mask = ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) alerts = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) if ((mask & INTRS_HRRQ_VALID) == 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) (alerts & DOORBELL_IOA_RESET_ALERT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) (intrs & PMCRAID_ERROR_INTERRUPTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) pmcraid_info("IOA requires hard reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) pinstance->ioa_hard_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) /* If unit check is active, trigger the dump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (intrs & INTRS_IOA_UNIT_CHECK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) pinstance->ioa_unit_check = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * pmcraid_bist_done - completion function for PCI BIST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * @cmd: pointer to reset command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static void pmcraid_ioa_reset(struct pmcraid_cmd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) static void pmcraid_bist_done(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct pmcraid_cmd *cmd = from_timer(cmd, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) u16 pci_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) /* If PCI config space can't be accessed wait for another two secs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) if ((rc != PCIBIOS_SUCCESSFUL || (!(pci_reg & PCI_COMMAND_MEMORY))) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) cmd->time_left > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) pmcraid_info("BIST not complete, waiting another 2 secs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) cmd->timer.expires = jiffies + cmd->time_left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) cmd->time_left = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) add_timer(&cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) cmd->time_left = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) pmcraid_info("BIST is complete, proceeding with reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) pmcraid_ioa_reset(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * pmcraid_start_bist - starts BIST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * @cmd: pointer to reset cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) u32 doorbells, intrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) /* proceed with bist and wait for 2 seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) iowrite32(DOORBELL_IOA_START_BIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) pinstance->int_regs.host_ioa_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) pmcraid_info("doorbells after start bist: %x intrs: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) doorbells, intrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) cmd->timer.function = pmcraid_bist_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) add_timer(&cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) * pmcraid_reset_alert_done - completion routine for reset_alert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) * @cmd: pointer to command block used in reset sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) static void pmcraid_reset_alert_done(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct pmcraid_cmd *cmd = from_timer(cmd, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) u32 status = ioread32(pinstance->ioa_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) /* if the critical operation in progress bit is set or the wait times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * out, invoke reset engine to proceed with hard reset. If there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) * some more time to wait, restart the timer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) if (((status & INTRS_CRITICAL_OP_IN_PROGRESS) == 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) cmd->time_left <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) pmcraid_info("critical op is reset proceeding with reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) pmcraid_ioa_reset(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) pmcraid_info("critical op is not yet reset waiting again\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /* restart timer if some more time is available to wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) cmd->timer.function = pmcraid_reset_alert_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) add_timer(&cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * pmcraid_reset_alert - alerts IOA for a possible reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * @cmd : command block to be used for reset sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) * returns 0 if pci config-space is accessible and RESET_DOORBELL is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * successfully written to IOA. Returns non-zero in case pci_config_space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * is not accessible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static void pmcraid_notify_ioastate(struct pmcraid_instance *, u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) u32 doorbells;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) u16 pci_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) /* If we are able to access IOA PCI config space, alert IOA that we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) * going to reset it soon. This enables IOA to preserv persistent error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) * data if any. In case memory space is not accessible, proceed with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * BIST or slot_reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) if ((rc == PCIBIOS_SUCCESSFUL) && (pci_reg & PCI_COMMAND_MEMORY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /* wait for IOA permission i.e until CRITICAL_OPERATION bit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * reset IOA doesn't generate any interrupts when CRITICAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * OPERATION bit is reset. A timer is started to wait for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * bit to be reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) cmd->time_left = PMCRAID_RESET_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) cmd->timer.function = pmcraid_reset_alert_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) add_timer(&cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) iowrite32(DOORBELL_IOA_RESET_ALERT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) pinstance->int_regs.host_ioa_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) doorbells =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) pmcraid_info("doorbells after reset alert: %x\n", doorbells);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) pmcraid_info("PCI config is not accessible starting BIST\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) pmcraid_start_bist(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * pmcraid_timeout_handler - Timeout handler for internally generated ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * @cmd : pointer to command structure, that got timedout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) * This function blocks host requests and initiates an adapter reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) static void pmcraid_timeout_handler(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) struct pmcraid_cmd *cmd = from_timer(cmd, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) dev_info(&pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) "Adapter being reset due to cmd(CDB[0] = %x) timeout\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) cmd->ioa_cb->ioarcb.cdb[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /* Command timeouts result in hard reset sequence. The command that got
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * timed out may be the one used as part of reset sequence. In this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * case restart reset sequence using the same command block even if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * reset is in progress. Otherwise fail this command and get a free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * command block to restart the reset sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) if (!pinstance->ioa_reset_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) pinstance->ioa_reset_attempts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) cmd = pmcraid_get_free_cmd(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) /* If we are out of command blocks, just return here itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) * Some other command's timeout handler can do the reset job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) if (cmd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) spin_unlock_irqrestore(pinstance->host->host_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) pmcraid_err("no free cmnd block for timeout handler\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) pinstance->reset_cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) pinstance->ioa_reset_in_progress = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) pmcraid_info("reset is already in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) if (pinstance->reset_cmd != cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /* This command should have been given to IOA, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * command will be completed by fail_outstanding_cmds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * anyway
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) pmcraid_err("cmd is pending but reset in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) /* If this command was being used as part of the reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * sequence, set cmd_done pointer to pmcraid_ioa_reset. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * causes fail_outstanding_commands not to return the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * block back to free pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) if (cmd == pinstance->reset_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) cmd->cmd_done = pmcraid_ioa_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /* Notify apps of important IOA bringup/bringdown sequences */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) if (pinstance->scn.ioa_state != PMC_DEVICE_EVENT_RESET_START &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) pinstance->scn.ioa_state != PMC_DEVICE_EVENT_SHUTDOWN_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) pmcraid_notify_ioastate(pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) PMC_DEVICE_EVENT_RESET_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) scsi_block_requests(pinstance->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) pmcraid_reset_alert(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) * pmcraid_internal_done - completion routine for internally generated cmds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) * @cmd: command that got response from IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * Return Value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static void pmcraid_internal_done(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) cmd->ioa_cb->ioarcb.cdb[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) /* Some of the internal commands are sent with callers blocking for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) * response. Same will be indicated as part of cmd->completion_req
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) * field. Response path needs to wake up any waiters waiting for cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * completion if this flag is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) if (cmd->completion_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) cmd->completion_req = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) complete(&cmd->wait_for_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) /* most of the internal commands are completed by caller itself, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * no need to return the command block back to free pool until we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * required to do so (e.g once done with initialization).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) if (cmd->release) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) cmd->release = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) pmcraid_return_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) * pmcraid_reinit_cfgtable_done - done function for cfg table reinitialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * @cmd: command that got response from IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * This routine is called after driver re-reads configuration table due to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * lost CCN. It returns the command block back to free pool and schedules
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * worker thread to add/delete devices into the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * Return Value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) static void pmcraid_reinit_cfgtable_done(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) cmd->ioa_cb->ioarcb.cdb[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (cmd->release) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) cmd->release = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) pmcraid_return_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) pmcraid_info("scheduling worker for config table reinitialization\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) schedule_work(&cmd->drv_inst->worker_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * pmcraid_erp_done - Process completion of SCSI error response from device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * @cmd: pmcraid_command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * This function copies the sense buffer into the scsi_cmd struct and completes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * scsi_cmd by calling scsi_done function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) if (PMCRAID_IOASC_SENSE_KEY(ioasc) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) scsi_cmd->result |= (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) scmd_printk(KERN_INFO, scsi_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) "command CDB[0] = %x failed with IOASC: 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) cmd->ioa_cb->ioarcb.cdb[0], ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (cmd->sense_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) dma_unmap_single(&pinstance->pdev->dev, cmd->sense_buffer_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) cmd->sense_buffer = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) cmd->sense_buffer_dma = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) scsi_dma_unmap(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) pmcraid_return_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) scsi_cmd->scsi_done(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * pmcraid_fire_command - sends an IOA command to adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * This function adds the given block into pending command list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * and returns without waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * @cmd : command to be sent to the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) static void _pmcraid_fire_command(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) /* Add this command block to pending cmd pool. We do this prior to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) * writting IOARCB to ioarrin because IOA might complete the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * by the time we are about to add it to the list. Response handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * (isr/tasklet) looks for cmd block in the pending pending list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) list_add_tail(&cmd->free_list, &pinstance->pending_cmd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) atomic_inc(&pinstance->outstanding_cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /* driver writes lower 32-bit value of IOARCB address only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) iowrite32(le64_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr), pinstance->ioarrin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) * pmcraid_send_cmd - fires a command to IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * This function also sets up timeout function, and command completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * @cmd: pointer to the command block to be fired to IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * @cmd_done: command completion function, called once IOA responds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) * @timeout: timeout to wait for this command completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) * @timeout_func: timeout handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) static void pmcraid_send_cmd(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) struct pmcraid_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) void (*cmd_done) (struct pmcraid_cmd *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) unsigned long timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) void (*timeout_func) (struct timer_list *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) /* initialize done function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) cmd->cmd_done = cmd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) if (timeout_func) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) /* setup timeout handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) cmd->timer.expires = jiffies + timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) cmd->timer.function = timeout_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) add_timer(&cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) /* fire the command to IOA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) _pmcraid_fire_command(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) * pmcraid_ioa_shutdown_done - completion function for IOA shutdown command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) * @cmd: pointer to the command block used for sending IOA shutdown command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static void pmcraid_ioa_shutdown_done(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) pmcraid_ioa_reset(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) * pmcraid_ioa_shutdown - sends SHUTDOWN command to ioa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * @cmd: pointer to the command block used as part of reset sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) pmcraid_info("response for Cancel CCN CDB[0] = %x ioasc = %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) cmd->ioa_cb->ioarcb.cdb[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) /* Note that commands sent during reset require next command to be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * to IOA. Hence reinit the done function as well as timeout function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) pmcraid_reinit_cmdblk(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) cmd->ioa_cb->ioarcb.request_type = REQ_TYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) cmd->ioa_cb->ioarcb.resource_handle =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) cmd->ioa_cb->ioarcb.cdb[0] = PMCRAID_IOA_SHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) cmd->ioa_cb->ioarcb.cdb[1] = PMCRAID_SHUTDOWN_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) /* fire shutdown command to hardware. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) pmcraid_info("firing normal shutdown command (%d) to IOA\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) pmcraid_notify_ioastate(cmd->drv_inst, PMC_DEVICE_EVENT_SHUTDOWN_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) pmcraid_send_cmd(cmd, pmcraid_ioa_shutdown_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) PMCRAID_SHUTDOWN_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) pmcraid_timeout_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * pmcraid_get_fwversion_done - completion function for get_fwversion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * @cmd: pointer to command block used to send INQUIRY command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) static void pmcraid_querycfg(struct pmcraid_cmd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) static void pmcraid_get_fwversion_done(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) /* configuration table entry size depends on firmware version. If fw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) * version is not known, it is not possible to interpret IOA config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) if (ioasc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) pmcraid_err("IOA Inquiry failed with %x\n", ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) pmcraid_reset_alert(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) pmcraid_querycfg(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * pmcraid_get_fwversion - reads firmware version information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * @cmd: pointer to command block used to send INQUIRY command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) static void pmcraid_get_fwversion(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) struct pmcraid_ioadl_desc *ioadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) u16 data_size = sizeof(struct pmcraid_inquiry_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) pmcraid_reinit_cmdblk(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ioarcb->request_type = REQ_TYPE_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) ioarcb->cdb[0] = INQUIRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) ioarcb->cdb[1] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) ioarcb->cdb[2] = 0xD0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) ioarcb->cdb[3] = (data_size >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ioarcb->cdb[4] = data_size & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) /* Since entire inquiry data it can be part of IOARCB itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) offsetof(struct pmcraid_ioarcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) add_data.u.ioadl[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) ioarcb->request_flags0 |= NO_LINK_DESCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) ioarcb->data_transfer_length = cpu_to_le32(data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) ioadl = &(ioarcb->add_data.u.ioadl[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) ioadl->flags = IOADL_FLAGS_LAST_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) ioadl->address = cpu_to_le64(pinstance->inq_data_baddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) ioadl->data_len = cpu_to_le32(data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) pmcraid_send_cmd(cmd, pmcraid_get_fwversion_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * pmcraid_identify_hrrq - registers host rrq buffers with IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * @cmd: pointer to command block to be used for identify hrrq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) int index = cmd->hrrq_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) __be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) __be32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) void (*done_function)(struct pmcraid_cmd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) pmcraid_reinit_cmdblk(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) cmd->hrrq_index = index + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) if (cmd->hrrq_index < pinstance->num_hrrq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) done_function = pmcraid_identify_hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) cmd->hrrq_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) done_function = pmcraid_get_fwversion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /* Initialize ioarcb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) ioarcb->request_type = REQ_TYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /* initialize the hrrq number where IOA will respond to this command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) ioarcb->hrrq_id = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) ioarcb->cdb[0] = PMCRAID_IDENTIFY_HRRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) ioarcb->cdb[1] = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /* IOA expects 64-bit pci address to be written in B.E format
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * (i.e cdb[2]=MSByte..cdb[9]=LSB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) pmcraid_info("HRRQ_IDENTIFY with hrrq:ioarcb:index => %llx:%llx:%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) hrrq_addr, ioarcb->ioarcb_bus_addr, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) memcpy(&(ioarcb->cdb[2]), &hrrq_addr, sizeof(hrrq_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) memcpy(&(ioarcb->cdb[10]), &hrrq_size, sizeof(hrrq_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /* Subsequent commands require HRRQ identification to be successful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * Note that this gets called even during reset from SCSI mid-layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) * or tasklet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) pmcraid_send_cmd(cmd, done_function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) PMCRAID_INTERNAL_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) pmcraid_timeout_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) static void pmcraid_process_ccn(struct pmcraid_cmd *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) static void pmcraid_process_ldn(struct pmcraid_cmd *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * pmcraid_send_hcam_cmd - send an initialized command block(HCAM) to IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * @cmd: initialized command block pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static void pmcraid_send_hcam_cmd(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) if (cmd->ioa_cb->ioarcb.cdb[1] == PMCRAID_HCAM_CODE_CONFIG_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) atomic_set(&(cmd->drv_inst->ccn.ignore), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) atomic_set(&(cmd->drv_inst->ldn.ignore), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) pmcraid_send_cmd(cmd, cmd->cmd_done, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * pmcraid_init_hcam - send an initialized command block(HCAM) to IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) * @pinstance: pointer to adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) * @type: HCAM type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) * pointer to initialized pmcraid_cmd structure or NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static struct pmcraid_cmd *pmcraid_init_hcam
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) struct pmcraid_instance *pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) u8 type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct pmcraid_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct pmcraid_ioarcb *ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) struct pmcraid_ioadl_desc *ioadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) struct pmcraid_hostrcb *hcam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) void (*cmd_done) (struct pmcraid_cmd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) int rcb_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) cmd = pmcraid_get_free_cmd(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) pmcraid_err("no free command blocks for hcam\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) return cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) if (type == PMCRAID_HCAM_CODE_CONFIG_CHANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) rcb_size = sizeof(struct pmcraid_hcam_ccn_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) cmd_done = pmcraid_process_ccn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) dma = pinstance->ccn.baddr + PMCRAID_AEN_HDR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) hcam = &pinstance->ccn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) rcb_size = sizeof(struct pmcraid_hcam_ldn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) cmd_done = pmcraid_process_ldn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) dma = pinstance->ldn.baddr + PMCRAID_AEN_HDR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) hcam = &pinstance->ldn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) /* initialize command pointer used for HCAM registration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) hcam->cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) ioarcb = &cmd->ioa_cb->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) offsetof(struct pmcraid_ioarcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) add_data.u.ioadl[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) ioadl = ioarcb->add_data.u.ioadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) /* Initialize ioarcb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) ioarcb->request_type = REQ_TYPE_HCAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) ioarcb->cdb[0] = PMCRAID_HOST_CONTROLLED_ASYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) ioarcb->cdb[1] = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) ioarcb->cdb[7] = (rcb_size >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) ioarcb->cdb[8] = (rcb_size) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) ioarcb->data_transfer_length = cpu_to_le32(rcb_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) ioadl[0].flags |= IOADL_FLAGS_READ_LAST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) ioadl[0].data_len = cpu_to_le32(rcb_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) ioadl[0].address = cpu_to_le64(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) cmd->cmd_done = cmd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) return cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * pmcraid_send_hcam - Send an HCAM to IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) * @pinstance: ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) * @type: HCAM type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) * This function will send a Host Controlled Async command to IOA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) static void pmcraid_send_hcam(struct pmcraid_instance *pinstance, u8 type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) struct pmcraid_cmd *cmd = pmcraid_init_hcam(pinstance, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) pmcraid_send_hcam_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * pmcraid_prepare_cancel_cmd - prepares a command block to abort another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) * @cmd: pointer to cmd that is used as cancelling command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) * @cmd_to_cancel: pointer to the command that needs to be cancelled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) static void pmcraid_prepare_cancel_cmd(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) struct pmcraid_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) struct pmcraid_cmd *cmd_to_cancel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) __be64 ioarcb_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) /* IOARCB address of the command to be cancelled is given in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) * cdb[2]..cdb[9] is Big-Endian format. Note that length bits in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) * IOARCB address are not masked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) ioarcb_addr = cpu_to_be64(le64_to_cpu(cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) /* Get the resource handle to where the command to be aborted has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) * sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) ioarcb->resource_handle = cmd_to_cancel->ioa_cb->ioarcb.resource_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) ioarcb->request_type = REQ_TYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) ioarcb->cdb[0] = PMCRAID_ABORT_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) memcpy(&(ioarcb->cdb[2]), &ioarcb_addr, sizeof(ioarcb_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) * pmcraid_cancel_hcam - sends ABORT task to abort a given HCAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) * @cmd: command to be used as cancelling command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) * @type: HCAM type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) * @cmd_done: op done function for the cancelling command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) static void pmcraid_cancel_hcam(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) struct pmcraid_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) u8 type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) void (*cmd_done) (struct pmcraid_cmd *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) struct pmcraid_instance *pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) struct pmcraid_hostrcb *hcam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) hcam = (type == PMCRAID_HCAM_CODE_LOG_DATA) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) &pinstance->ldn : &pinstance->ccn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /* prepare for cancelling previous hcam command. If the HCAM is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * currently not pending with IOA, we would have hcam->cmd as non-null
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) if (hcam->cmd == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) pmcraid_prepare_cancel_cmd(cmd, hcam->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) /* writing to IOARRIN must be protected by host_lock, as mid-layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) * schedule queuecommand while we are doing this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) pmcraid_send_cmd(cmd, cmd_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) PMCRAID_INTERNAL_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) pmcraid_timeout_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) * pmcraid_cancel_ccn - cancel CCN HCAM already registered with IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) * @cmd: command block to be used for cancelling the HCAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) static void pmcraid_cancel_ccn(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) pmcraid_info("response for Cancel LDN CDB[0] = %x ioasc = %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) cmd->ioa_cb->ioarcb.cdb[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) pmcraid_reinit_cmdblk(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) pmcraid_cancel_hcam(cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) PMCRAID_HCAM_CODE_CONFIG_CHANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) pmcraid_ioa_shutdown);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * pmcraid_cancel_ldn - cancel LDN HCAM already registered with IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) * @cmd: command block to be used for cancelling the HCAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) static void pmcraid_cancel_ldn(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) pmcraid_cancel_hcam(cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) PMCRAID_HCAM_CODE_LOG_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) pmcraid_cancel_ccn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) * pmcraid_expose_resource - check if the resource can be exposed to OS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) * @fw_version: firmware version code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * @cfgte: pointer to configuration table entry of the resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) * true if resource can be added to midlayer, false(0) otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) static int pmcraid_expose_resource(u16 fw_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) struct pmcraid_config_table_entry *cfgte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) int retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (cfgte->resource_type == RES_TYPE_VSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) if (fw_version <= PMCRAID_FW_VERSION_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) retval = ((cfgte->unique_flags1 & 0x80) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) retval = ((cfgte->unique_flags0 & 0x80) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) (cfgte->unique_flags1 & 0x80) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) } else if (cfgte->resource_type == RES_TYPE_GSCSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) retval = (RES_BUS(cfgte->resource_address) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) PMCRAID_VIRTUAL_ENCL_BUS_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) /* attributes supported by pmcraid_event_family */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) PMCRAID_AEN_ATTR_UNSPEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) PMCRAID_AEN_ATTR_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) __PMCRAID_AEN_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) #define PMCRAID_AEN_ATTR_MAX (__PMCRAID_AEN_ATTR_MAX - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) /* commands supported by pmcraid_event_family */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) PMCRAID_AEN_CMD_UNSPEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) PMCRAID_AEN_CMD_EVENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) __PMCRAID_AEN_CMD_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) #define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) static struct genl_multicast_group pmcraid_mcgrps[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) { .name = "events", /* not really used - see ID discussion below */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) static struct genl_family pmcraid_event_family __ro_after_init = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) .name = "pmcraid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) .version = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) .maxattr = PMCRAID_AEN_ATTR_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) .mcgrps = pmcraid_mcgrps,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) .n_mcgrps = ARRAY_SIZE(pmcraid_mcgrps),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) * pmcraid_netlink_init - registers pmcraid_event_family
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) * 0 if the pmcraid_event_family is successfully registered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) * with netlink generic, non-zero otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) static int __init pmcraid_netlink_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) result = genl_register_family(&pmcraid_event_family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) pmcraid_info("registered NETLINK GENERIC group: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) pmcraid_event_family.id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * pmcraid_netlink_release - unregisters pmcraid_event_family
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) static void pmcraid_netlink_release(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) genl_unregister_family(&pmcraid_event_family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) * pmcraid_notify_aen - sends event msg to user space application
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) * @pinstance: pointer to adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) * @type: HCAM type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) * 0 if success, error value in case of any failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) static int pmcraid_notify_aen(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) struct pmcraid_instance *pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) struct pmcraid_aen_msg *aen_msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) u32 data_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) struct sk_buff *skb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) void *msg_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) u32 total_size, nla_genl_hdr_total_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) int result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) aen_msg->hostno = (pinstance->host->unique_id << 16 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) MINOR(pinstance->cdev.dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) aen_msg->length = data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) data_size += sizeof(*aen_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) total_size = nla_total_size(data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) /* Add GENL_HDR to total_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) nla_genl_hdr_total_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) (total_size + (GENL_HDRLEN +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) ((struct genl_family *)&pmcraid_event_family)->hdrsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) + NLMSG_HDRLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) skb = genlmsg_new(nla_genl_hdr_total_size, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) if (!skb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) pmcraid_err("Failed to allocate aen data SKB of size: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) total_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) /* add the genetlink message header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) msg_header = genlmsg_put(skb, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) &pmcraid_event_family, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) PMCRAID_AEN_CMD_EVENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (!msg_header) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) pmcraid_err("failed to copy command details\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) nlmsg_free(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) result = nla_put(skb, PMCRAID_AEN_ATTR_EVENT, data_size, aen_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) if (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) pmcraid_err("failed to copy AEN attribute data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) nlmsg_free(skb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) /* send genetlink multicast message to notify appplications */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) genlmsg_end(skb, msg_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) result = genlmsg_multicast(&pmcraid_event_family, skb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 0, 0, GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) /* If there are no listeners, genlmsg_multicast may return non-zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) * value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) if (result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) pmcraid_info("error (%x) sending aen event message\n", result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) * pmcraid_notify_ccn - notifies about CCN event msg to user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) * @pinstance: pointer adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) * 0 if success, error value in case of any failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) static int pmcraid_notify_ccn(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) return pmcraid_notify_aen(pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) pinstance->ccn.msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) le32_to_cpu(pinstance->ccn.hcam->data_len) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) sizeof(struct pmcraid_hcam_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) * pmcraid_notify_ldn - notifies about CCN event msg to user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * @pinstance: pointer adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) * 0 if success, error value in case of any failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) static int pmcraid_notify_ldn(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) return pmcraid_notify_aen(pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) pinstance->ldn.msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) le32_to_cpu(pinstance->ldn.hcam->data_len) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) sizeof(struct pmcraid_hcam_hdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) * pmcraid_notify_ioastate - sends IOA state event msg to user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) * @pinstance: pointer adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) * @evt: controller state event to be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) * 0 if success, error value in case of any failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) static void pmcraid_notify_ioastate(struct pmcraid_instance *pinstance, u32 evt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) pinstance->scn.ioa_state = evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) pmcraid_notify_aen(pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) &pinstance->scn.msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) * pmcraid_handle_config_change - Handle a config change from the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) * @pinstance: pointer to per adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) struct pmcraid_config_table_entry *cfg_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) struct pmcraid_hcam_ccn *ccn_hcam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) struct pmcraid_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) struct pmcraid_cmd *cfgcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) struct pmcraid_resource_entry *res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) unsigned long host_lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) u32 new_entry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) u32 hidden_entry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) u16 fw_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) cfg_entry = &ccn_hcam->cfg_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) res: %x:%x:%x:%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) le32_to_cpu(pinstance->ccn.hcam->ilid),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) pinstance->ccn.hcam->op_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) (le32_to_cpu(pinstance->ccn.hcam->timestamp1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) ((le32_to_cpu(pinstance->ccn.hcam->timestamp2) & 0xffffffffLL) << 32)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) pinstance->ccn.hcam->notification_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) pinstance->ccn.hcam->notification_lost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) pinstance->ccn.hcam->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) pinstance->host->unique_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) RES_IS_VSET(*cfg_entry) ? PMCRAID_VSET_BUS_ID :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) (RES_IS_GSCSI(*cfg_entry) ? PMCRAID_PHYS_BUS_ID :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) RES_BUS(cfg_entry->resource_address)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) RES_IS_VSET(*cfg_entry) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) (fw_version <= PMCRAID_FW_VERSION_1 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) cfg_entry->unique_flags1 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) le16_to_cpu(cfg_entry->array_id) & 0xFF) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) RES_TARGET(cfg_entry->resource_address),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) RES_LUN(cfg_entry->resource_address));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) /* If this HCAM indicates a lost notification, read the config table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) if (pinstance->ccn.hcam->notification_lost) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) cfgcmd = pmcraid_get_free_cmd(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) if (cfgcmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) pmcraid_info("lost CCN, reading config table\b");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) pinstance->reinit_cfg_table = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) pmcraid_querycfg(cfgcmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) pmcraid_err("lost CCN, no free cmd for querycfg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) goto out_notify_apps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) /* If this resource is not going to be added to mid-layer, just notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) * applications and return. If this notification is about hiding a VSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) * resource, check if it was exposed already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (pinstance->ccn.hcam->notification_type ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) NOTIFICATION_TYPE_ENTRY_CHANGED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) cfg_entry->resource_type == RES_TYPE_VSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) hidden_entry = (cfg_entry->unique_flags1 & 0x80) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) } else if (!pmcraid_expose_resource(fw_version, cfg_entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) goto out_notify_apps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) list_for_each_entry(res, &pinstance->used_res_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) rc = memcmp(&res->cfg_entry.resource_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) &cfg_entry->resource_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) sizeof(cfg_entry->resource_address));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) new_entry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) if (new_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) if (hidden_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) spin_unlock_irqrestore(&pinstance->resource_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) goto out_notify_apps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) /* If there are more number of resources than what driver can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) * manage, do not notify the applications about the CCN. Just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) * ignore this notifications and re-register the same HCAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) if (list_empty(&pinstance->free_res_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) spin_unlock_irqrestore(&pinstance->resource_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) pmcraid_err("too many resources attached\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) spin_lock_irqsave(pinstance->host->host_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) pmcraid_send_hcam(pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) PMCRAID_HCAM_CODE_CONFIG_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) spin_unlock_irqrestore(pinstance->host->host_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) res = list_entry(pinstance->free_res_q.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) struct pmcraid_resource_entry, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) list_del(&res->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) res->scsi_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) res->reset_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) list_add_tail(&res->queue, &pinstance->used_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) memcpy(&res->cfg_entry, cfg_entry, pinstance->config_table_entry_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) if (pinstance->ccn.hcam->notification_type ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) NOTIFICATION_TYPE_ENTRY_DELETED || hidden_entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) if (res->scsi_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) if (fw_version <= PMCRAID_FW_VERSION_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) res->cfg_entry.unique_flags1 &= 0x7F;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) res->cfg_entry.array_id &= cpu_to_le16(0xFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) res->change_detected = RES_CHANGE_DEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) res->cfg_entry.resource_handle =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) PMCRAID_INVALID_RES_HANDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) schedule_work(&pinstance->worker_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) /* This may be one of the non-exposed resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) list_move_tail(&res->queue, &pinstance->free_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) } else if (!res->scsi_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) res->change_detected = RES_CHANGE_ADD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) schedule_work(&pinstance->worker_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) out_notify_apps:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) /* Notify configuration changes to registered applications.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) if (!pmcraid_disable_aen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) pmcraid_notify_ccn(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) if (cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) pmcraid_send_hcam_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) * pmcraid_get_error_info - return error string for an ioasc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) * @ioasc: ioasc code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) for (i = 0; i < ARRAY_SIZE(pmcraid_ioasc_error_table); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) if (pmcraid_ioasc_error_table[i].ioasc_code == ioasc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) return &pmcraid_ioasc_error_table[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) * pmcraid_ioasc_logger - log IOASC information based user-settings
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) * @ioasc: ioasc code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) * @cmd: pointer to command that resulted in 'ioasc'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) static void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) if (error_info == NULL ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) cmd->drv_inst->current_log_level < error_info->log_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) /* log the error string */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) pmcraid_err("cmd [%x] for resource %x failed with %x(%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) cmd->ioa_cb->ioarcb.cdb[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) ioasc, error_info->error_string);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) * pmcraid_handle_error_log - Handle a config change (error log) from the IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * @pinstance: pointer to per adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) static void pmcraid_handle_error_log(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) struct pmcraid_hcam_ldn *hcam_ldn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) u32 ioasc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) hcam_ldn = (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) pmcraid_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) ("LDN(%x): %x type: %x lost: %x flags: %x overlay id: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) pinstance->ldn.hcam->ilid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) pinstance->ldn.hcam->op_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) pinstance->ldn.hcam->notification_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) pinstance->ldn.hcam->notification_lost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) pinstance->ldn.hcam->flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) pinstance->ldn.hcam->overlay_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) /* log only the errors, no need to log informational log entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) if (pinstance->ldn.hcam->notification_type !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) NOTIFICATION_TYPE_ERROR_LOG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (pinstance->ldn.hcam->notification_lost ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) HOSTRCB_NOTIFICATIONS_LOST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) dev_info(&pinstance->pdev->dev, "Error notifications lost\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) ioasc = le32_to_cpu(hcam_ldn->error_log.fd_ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) dev_info(&pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) "UnitAttention due to IOA Bus Reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) scsi_report_bus_reset(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) pinstance->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) RES_BUS(hcam_ldn->error_log.fd_ra));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) * pmcraid_process_ccn - Op done function for a CCN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) * @cmd: pointer to command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) * This function is the op done function for a configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * change notification
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) pinstance->ccn.cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) pmcraid_return_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) /* If driver initiated IOA reset happened while this hcam was pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) * with IOA, or IOA bringdown sequence is in progress, no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) * re-register the hcam
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) atomic_read(&pinstance->ccn.ignore) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) } else if (ioasc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) dev_info(&pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) "Host RCB (CCN) failed with IOASC: 0x%08X\n", ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) pmcraid_handle_config_change(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) * pmcraid_process_ldn - op done function for an LDN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) * @cmd: pointer to command block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) static void pmcraid_initiate_reset(struct pmcraid_instance *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) struct pmcraid_hcam_ldn *ldn_hcam =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) u32 fd_ioasc = le32_to_cpu(ldn_hcam->error_log.fd_ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) /* return the command block back to freepool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) pinstance->ldn.cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) pmcraid_return_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) /* If driver initiated IOA reset happened while this hcam was pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * with IOA, no need to re-register the hcam as reset engine will do it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) * once reset sequence is complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) atomic_read(&pinstance->ccn.ignore) == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) } else if (!ioasc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) pmcraid_handle_error_log(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) if (fd_ioasc == PMCRAID_IOASC_NR_IOA_RESET_REQUIRED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) spin_lock_irqsave(pinstance->host->host_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) pmcraid_initiate_reset(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) spin_unlock_irqrestore(pinstance->host->host_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) if (fd_ioasc == PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) pinstance->timestamp_error = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) pmcraid_set_timestamp(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) dev_info(&pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) "Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) /* send netlink message for HCAM notification if enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) if (!pmcraid_disable_aen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) pmcraid_notify_ldn(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) if (cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) pmcraid_send_hcam_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * pmcraid_register_hcams - register HCAMs for CCN and LDN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * @pinstance: pointer per adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) static void pmcraid_register_hcams(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) * pmcraid_unregister_hcams - cancel HCAMs registered already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) * @cmd: pointer to command used as part of reset sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) /* During IOA bringdown, HCAM gets fired and tasklet proceeds with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) * handling hcam response though it is not necessary. In order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) * prevent this, set 'ignore', so that bring-down sequence doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) * re-send any more hcams
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) atomic_set(&pinstance->ccn.ignore, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) atomic_set(&pinstance->ldn.ignore, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) /* If adapter reset was forced as part of runtime reset sequence,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) * start the reset sequence. Reset will be triggered even in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) * IOA unit_check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) if ((pinstance->force_ioa_reset && !pinstance->ioa_bringdown) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) pinstance->ioa_unit_check) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) pinstance->force_ioa_reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) pinstance->ioa_unit_check = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) pmcraid_reset_alert(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) /* Driver tries to cancel HCAMs by sending ABORT TASK for each HCAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * one after the other. So CCN cancellation will be triggered by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) * pmcraid_cancel_ldn itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) pmcraid_cancel_ldn(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) * pmcraid_reset_enable_ioa - re-enable IOA after a hard reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) * @pinstance: pointer to adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * 1 if TRANSITION_TO_OPERATIONAL is active, otherwise 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) static void pmcraid_reinit_buffers(struct pmcraid_instance *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) u32 intrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) pmcraid_reinit_buffers(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) intrs = pmcraid_read_interrupts(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) if (!pinstance->interrupt_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) pinstance->int_regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) ioa_host_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) pinstance->int_regs.ioa_host_interrupt_clr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) * pmcraid_soft_reset - performs a soft reset and makes IOA become ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) * @cmd : pointer to reset command block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) u32 int_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) u32 doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) /* There will be an interrupt when Transition to Operational bit is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) * set so tasklet would execute next reset task. The timeout handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) * would re-initiate a reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) cmd->cmd_done = pmcraid_ioa_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) cmd->timer.expires = jiffies +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) cmd->timer.function = pmcraid_timeout_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) if (!timer_pending(&cmd->timer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) add_timer(&cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) /* Enable destructive diagnostics on IOA if it is not yet in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) * operational state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) doorbell = DOORBELL_RUNTIME_RESET |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) DOORBELL_ENABLE_DESTRUCTIVE_DIAGS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) /* Since we do RESET_ALERT and Start BIST we have to again write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) * MSIX Doorbell to indicate the interrupt mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) if (pinstance->interrupt_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) iowrite32(DOORBELL_INTR_MODE_MSIX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) pinstance->int_regs.host_ioa_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) pmcraid_info("Waiting for IOA to become operational %x:%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) int_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) * pmcraid_get_dump - retrieves IOA dump in case of Unit Check interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) * @pinstance: pointer to adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) static void pmcraid_get_dump(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) pmcraid_info("%s is not yet implemented\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) * pmcraid_fail_outstanding_cmds - Fails all outstanding ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) * @pinstance: pointer to adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) * This function fails all outstanding ops. If they are submitted to IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) * already, it sends cancel all messages if IOA is still accepting IOARCBs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) * otherwise just completes the commands and returns the cmd blocks to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) * pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) struct pmcraid_cmd *cmd, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) /* pending command list is protected by pending_pool_lock. Its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) * traversal must be done as within this lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) list_for_each_entry_safe(cmd, temp, &pinstance->pending_cmd_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) free_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) list_del(&cmd->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) spin_unlock_irqrestore(&pinstance->pending_pool_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) cmd->ioa_cb->ioasa.ioasc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) cpu_to_le32(PMCRAID_IOASC_IOA_WAS_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) cmd->ioa_cb->ioasa.ilid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) cpu_to_le32(PMCRAID_DRIVER_ILID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) /* In case the command timer is still running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) del_timer(&cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) /* If this is an IO command, complete it by invoking scsi_done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) * function. If this is one of the internal commands other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) * than pmcraid_ioa_reset and HCAM commands invoke cmd_done to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) * complete it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) if (cmd->scsi_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) __le32 resp = cmd->ioa_cb->ioarcb.response_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) scsi_cmd->result |= DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) scsi_dma_unmap(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) pmcraid_return_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) pmcraid_info("failing(%d) CDB[0] = %x result: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) le32_to_cpu(resp) >> 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) cmd->ioa_cb->ioarcb.cdb[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) scsi_cmd->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) scsi_cmd->scsi_done(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) } else if (cmd->cmd_done == pmcraid_internal_done ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) cmd->cmd_done == pmcraid_erp_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) cmd->cmd_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) } else if (cmd->cmd_done != pmcraid_ioa_reset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) cmd->cmd_done != pmcraid_ioa_shutdown_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) pmcraid_return_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) atomic_dec(&pinstance->outstanding_cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) * pmcraid_ioa_reset - Implementation of IOA reset logic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) * @cmd: pointer to the cmd block to be used for entire reset process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) * This function executes most of the steps required for IOA reset. This gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) * called by user threads (modprobe/insmod/rmmod) timer, tasklet and midlayer's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) * 'eh_' thread. Access to variables used for controlling the reset sequence is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) * synchronized using host lock. Various functions called during reset process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) * would make use of a single command block, pointer to which is also stored in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) * adapter instance structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) u8 reset_complete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) pinstance->ioa_reset_in_progress = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (pinstance->reset_cmd != cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) pmcraid_err("reset is called with different command block\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) pinstance->reset_cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) pmcraid_info("reset_engine: state = %d, command = %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) pinstance->ioa_state, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) switch (pinstance->ioa_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) case IOA_STATE_DEAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) /* If IOA is offline, whatever may be the reset reason, just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) * return. callers might be waiting on the reset wait_q, wake
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) * up them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) pmcraid_err("IOA is offline no reset is possible\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) reset_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) case IOA_STATE_IN_BRINGDOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) /* we enter here, once ioa shutdown command is processed by IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) * Alert IOA for a possible reset. If reset alert fails, IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) * goes through hard-reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) pmcraid_disable_interrupts(pinstance, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) pmcraid_reset_alert(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) case IOA_STATE_UNKNOWN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) /* We may be called during probe or resume. Some pre-processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) * is required for prior to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) scsi_block_requests(pinstance->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) /* If asked to reset while IOA was processing responses or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) * there are any error responses then IOA may require
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) * hard-reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) if (pinstance->ioa_hard_reset == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) if (ioread32(pinstance->ioa_status) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) INTRS_TRANSITION_TO_OPERATIONAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) pmcraid_info("sticky bit set, bring-up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) pmcraid_reinit_cmdblk(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) pmcraid_identify_hrrq(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) pmcraid_soft_reset(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) /* Alert IOA of a possible reset and wait for critical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) * operation in progress bit to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) pmcraid_reset_alert(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) case IOA_STATE_IN_RESET_ALERT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) /* If critical operation in progress bit is reset or wait gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) * timed out, reset proceeds with starting BIST on the IOA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) * pmcraid_ioa_hard_reset keeps a count of reset attempts. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) * they are 3 or more, reset engine marks IOA dead and returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) pmcraid_start_bist(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) case IOA_STATE_IN_HARD_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) pinstance->ioa_reset_attempts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) /* retry reset if we haven't reached maximum allowed limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) if (pinstance->ioa_reset_attempts > PMCRAID_RESET_ATTEMPTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) pinstance->ioa_reset_attempts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) pmcraid_err("IOA didn't respond marking it as dead\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) pinstance->ioa_state = IOA_STATE_DEAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) if (pinstance->ioa_bringdown)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) pmcraid_notify_ioastate(pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) PMC_DEVICE_EVENT_SHUTDOWN_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) pmcraid_notify_ioastate(pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) PMC_DEVICE_EVENT_RESET_FAILED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) reset_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) /* Once either bist or pci reset is done, restore PCI config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) * space. If this fails, proceed with hard reset again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) pci_restore_state(pinstance->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) /* fail all pending commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) pmcraid_fail_outstanding_cmds(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) /* check if unit check is active, if so extract dump */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) if (pinstance->ioa_unit_check) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) pmcraid_info("unit check is active\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) pinstance->ioa_unit_check = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) pmcraid_get_dump(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) pinstance->ioa_reset_attempts--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) pmcraid_reset_alert(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) /* if the reset reason is to bring-down the ioa, we might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) * done with the reset restore pci_config_space and complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) * the reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) if (pinstance->ioa_bringdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) pmcraid_info("bringing down the adapter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) pinstance->ioa_bringdown = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) pinstance->ioa_state = IOA_STATE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) pmcraid_notify_ioastate(pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) PMC_DEVICE_EVENT_SHUTDOWN_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) reset_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) /* bring-up IOA, so proceed with soft reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) * Reinitialize hrrq_buffers and their indices also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) * enable interrupts after a pci_restore_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) if (pmcraid_reset_enable_ioa(pinstance)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) pmcraid_info("bringing up the adapter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) pmcraid_reinit_cmdblk(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) pmcraid_identify_hrrq(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) pmcraid_soft_reset(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) case IOA_STATE_IN_SOFT_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) /* TRANSITION TO OPERATIONAL is on so start initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) * sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) pmcraid_info("In softreset proceeding with bring-up\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) /* Initialization commands start with HRRQ identification. From
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) * now on tasklet completes most of the commands as IOA is up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) * and intrs are enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) pmcraid_identify_hrrq(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) case IOA_STATE_IN_BRINGUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) /* we are done with bringing up of IOA, change the ioa_state to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) * operational and wake up any waiters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) pinstance->ioa_state = IOA_STATE_OPERATIONAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) reset_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) case IOA_STATE_OPERATIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) /* When IOA is operational and a reset is requested, check for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) * the reset reason. If reset is to bring down IOA, unregister
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) * HCAMs and initiate shutdown; if adapter reset is forced then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) * restart reset sequence again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) if (pinstance->ioa_shutdown_type == SHUTDOWN_NONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) pinstance->force_ioa_reset == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) pmcraid_notify_ioastate(pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) PMC_DEVICE_EVENT_RESET_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) reset_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) if (pinstance->ioa_shutdown_type != SHUTDOWN_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) pinstance->ioa_state = IOA_STATE_IN_BRINGDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) pmcraid_reinit_cmdblk(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) pmcraid_unregister_hcams(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) /* reset will be completed if ioa_state is either DEAD or UNKNOWN or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * OPERATIONAL. Reset all control variables used during reset, wake up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) * any waiting threads and let the SCSI mid-layer send commands. Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) * that host_lock must be held before invoking scsi_report_bus_reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) if (reset_complete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) pinstance->ioa_reset_in_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) pinstance->ioa_reset_attempts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) pinstance->reset_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) pinstance->ioa_bringdown = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) pmcraid_return_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) /* If target state is to bring up the adapter, proceed with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) * hcam registration and resource exposure to mid-layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) if (pinstance->ioa_state == IOA_STATE_OPERATIONAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) pmcraid_register_hcams(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) wake_up_all(&pinstance->reset_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) * pmcraid_initiate_reset - initiates reset sequence. This is called from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) * ISR/tasklet during error interrupts including IOA unit check. If reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) * is already in progress, it just returns, otherwise initiates IOA reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) * to bring IOA up to operational state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) * @pinstance: pointer to adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) static void pmcraid_initiate_reset(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) struct pmcraid_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) /* If the reset is already in progress, just return, otherwise start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) * reset sequence and return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) if (!pinstance->ioa_reset_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) scsi_block_requests(pinstance->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) cmd = pmcraid_get_free_cmd(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) if (cmd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) pmcraid_err("no cmnd blocks for initiate_reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) pinstance->reset_cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) pinstance->force_ioa_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) pmcraid_notify_ioastate(pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) PMC_DEVICE_EVENT_RESET_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) pmcraid_ioa_reset(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) * pmcraid_reset_reload - utility routine for doing IOA reset either to bringup
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) * or bringdown IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) * @pinstance: pointer adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) * @shutdown_type: shutdown type to be used NONE, NORMAL or ABRREV
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) * @target_state: expected target state after reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) * Note: This command initiates reset and waits for its completion. Hence this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) * should not be called from isr/timer/tasklet functions (timeout handlers,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) * error response handlers and interrupt handlers).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) * 1 in case ioa_state is not target_state, 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) static int pmcraid_reset_reload(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) struct pmcraid_instance *pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) u8 shutdown_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) u8 target_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) struct pmcraid_cmd *reset_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) int reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) if (pinstance->ioa_reset_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) pmcraid_info("reset_reload: reset is already in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) wait_event(pinstance->reset_wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) !pinstance->ioa_reset_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) if (pinstance->ioa_state == IOA_STATE_DEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) pmcraid_info("reset_reload: IOA is dead\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) if (pinstance->ioa_state == target_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) pmcraid_info("reset_reload: proceeding with reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) scsi_block_requests(pinstance->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) reset_cmd = pmcraid_get_free_cmd(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) if (reset_cmd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) pmcraid_err("no free cmnd for reset_reload\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) if (shutdown_type == SHUTDOWN_NORMAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) pinstance->ioa_bringdown = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) pinstance->ioa_shutdown_type = shutdown_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) pinstance->reset_cmd = reset_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) pinstance->force_ioa_reset = reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) pmcraid_info("reset_reload: initiating reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) pmcraid_ioa_reset(reset_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) pmcraid_info("reset_reload: waiting for reset to complete\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) wait_event(pinstance->reset_wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) !pinstance->ioa_reset_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) pmcraid_info("reset_reload: reset is complete !!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) scsi_unblock_requests(pinstance->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) return pinstance->ioa_state != target_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) return reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) * pmcraid_reset_bringdown - wrapper over pmcraid_reset_reload to bringdown IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) * @pinstance: pointer to adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) * whatever is returned from pmcraid_reset_reload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) static int pmcraid_reset_bringdown(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) return pmcraid_reset_reload(pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) SHUTDOWN_NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) IOA_STATE_UNKNOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) * pmcraid_reset_bringup - wrapper over pmcraid_reset_reload to bring up IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) * @pinstance: pointer to adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) * whatever is returned from pmcraid_reset_reload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) static int pmcraid_reset_bringup(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) pmcraid_notify_ioastate(pinstance, PMC_DEVICE_EVENT_RESET_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) return pmcraid_reset_reload(pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) SHUTDOWN_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) IOA_STATE_OPERATIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) * pmcraid_request_sense - Send request sense to a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) * @cmd: pmcraid command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) * This function sends a request sense to a device as a result of a check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) * condition. This method re-uses the same command block that failed earlier.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) struct device *dev = &cmd->drv_inst->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) cmd->sense_buffer = cmd->scsi_cmd->sense_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) cmd->sense_buffer_dma = dma_map_single(dev, cmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) if (dma_mapping_error(dev, cmd->sense_buffer_dma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) pmcraid_err
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) ("couldn't allocate sense buffer for request sense\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) pmcraid_erp_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) /* re-use the command block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) memset(&cmd->ioa_cb->ioasa, 0, sizeof(struct pmcraid_ioasa));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) ioarcb->request_flags0 = (SYNC_COMPLETE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) NO_LINK_DESCS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) INHIBIT_UL_CHECK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) ioarcb->request_type = REQ_TYPE_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) ioarcb->cdb[0] = REQUEST_SENSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) ioarcb->cdb[4] = SCSI_SENSE_BUFFERSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) offsetof(struct pmcraid_ioarcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) add_data.u.ioadl[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) ioarcb->data_transfer_length = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) ioadl->address = cpu_to_le64(cmd->sense_buffer_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) ioadl->flags = IOADL_FLAGS_LAST_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) /* request sense might be called as part of error response processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) * which runs in tasklets context. It is possible that mid-layer might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) * schedule queuecommand during this time, hence, writting to IOARRIN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) * must be protect by host_lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) pmcraid_send_cmd(cmd, pmcraid_erp_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) PMCRAID_REQUEST_SENSE_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) pmcraid_timeout_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) * pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) * @cmd: command that failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) * @need_sense: true if request_sense is required after cancel all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) * This function sends a cancel all to a device to clear the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, bool need_sense)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) ioarcb->request_flags0 = SYNC_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) ioarcb->request_type = REQ_TYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) ioarcb->cdb[0] = PMCRAID_CANCEL_ALL_REQUESTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) if (RES_IS_GSCSI(res->cfg_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) ioarcb->cdb[1] = PMCRAID_SYNC_COMPLETE_AFTER_CANCEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) ioarcb->ioadl_bus_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) ioarcb->ioadl_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) ioarcb->data_transfer_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) ioarcb->ioarcb_bus_addr &= cpu_to_le64((~0x1FULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) /* writing to IOARRIN must be protected by host_lock, as mid-layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) * schedule queuecommand while we are doing this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) pmcraid_send_cmd(cmd, need_sense ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) pmcraid_erp_done : pmcraid_request_sense,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) PMCRAID_REQUEST_SENSE_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) pmcraid_timeout_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) * pmcraid_frame_auto_sense: frame fixed format sense information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) * @cmd: pointer to failing command block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) static void pmcraid_frame_auto_sense(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) u8 *sense_buf = cmd->scsi_cmd->sense_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) struct pmcraid_resource_entry *res = cmd->scsi_cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) u32 ioasc = le32_to_cpu(ioasa->ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) u32 failing_lba = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) if (RES_IS_VSET(res->cfg_entry) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) ioasa->u.vset.failing_lba_hi != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) sense_buf[0] = 0x72;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) sense_buf[1] = PMCRAID_IOASC_SENSE_KEY(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) sense_buf[2] = PMCRAID_IOASC_SENSE_CODE(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) sense_buf[3] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) sense_buf[7] = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) sense_buf[8] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) sense_buf[9] = 0x0A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) sense_buf[10] = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) sense_buf[12] = (failing_lba & 0xff000000) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) sense_buf[15] = failing_lba & 0x000000ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) sense_buf[16] = (failing_lba & 0xff000000) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) sense_buf[19] = failing_lba & 0x000000ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) sense_buf[0] = 0x70;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) sense_buf[2] = PMCRAID_IOASC_SENSE_KEY(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) sense_buf[12] = PMCRAID_IOASC_SENSE_CODE(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) sense_buf[13] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) if (ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) if (RES_IS_VSET(res->cfg_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) failing_lba =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) le32_to_cpu(ioasa->u.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) vset.failing_lba_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) sense_buf[0] |= 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) sense_buf[3] = (failing_lba >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) sense_buf[4] = (failing_lba >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) sense_buf[5] = (failing_lba >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) sense_buf[6] = failing_lba & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) sense_buf[7] = 6; /* additional length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) * pmcraid_error_handler - Error response handlers for a SCSI op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) * @cmd: pointer to pmcraid_cmd that has failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) * This function determines whether or not to initiate ERP on the affected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) * device. This is called from a tasklet, which doesn't hold any locks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) * 0 it caller can complete the request, otherwise 1 where in error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) * handler itself completes the request and returns the command block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) * back to free-pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) u32 ioasc = le32_to_cpu(ioasa->ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) bool sense_copied = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) pmcraid_info("resource pointer is NULL\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) /* If this was a SCSI read/write command keep count of errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) atomic_inc(&res->read_failures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) atomic_inc(&res->write_failures);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) if (!RES_IS_GSCSI(res->cfg_entry) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) pmcraid_frame_auto_sense(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) /* Log IOASC/IOASA information based on user settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) pmcraid_ioasc_logger(ioasc, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) switch (masked_ioasc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) case PMCRAID_IOASC_AC_TERMINATED_BY_HOST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) scsi_cmd->result |= (DID_ABORT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) case PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) case PMCRAID_IOASC_HW_CANNOT_COMMUNICATE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) scsi_cmd->result |= (DID_NO_CONNECT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) case PMCRAID_IOASC_NR_SYNC_REQUIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) res->sync_reqd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) scsi_cmd->result |= (DID_IMM_RETRY << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) case PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) scsi_cmd->result |= (DID_PASSTHROUGH << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) case PMCRAID_IOASC_UA_BUS_WAS_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) case PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) if (!res->reset_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) scsi_report_bus_reset(pinstance->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) scsi_cmd->device->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) scsi_cmd->result |= (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) case PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) scsi_cmd->result |= PMCRAID_IOASC_SENSE_STATUS(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) res->sync_reqd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) /* if check_condition is not active return with error otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) * get/frame the sense buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) if (PMCRAID_IOASC_SENSE_STATUS(ioasc) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) SAM_STAT_CHECK_CONDITION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) PMCRAID_IOASC_SENSE_STATUS(ioasc) != SAM_STAT_ACA_ACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) /* If we have auto sense data as part of IOASA pass it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) * mid-layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) if (ioasa->auto_sense_length != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) short sense_len = le16_to_cpu(ioasa->auto_sense_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) int data_size = min_t(u16, sense_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) memcpy(scsi_cmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) ioasa->sense_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) sense_copied = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) if (RES_IS_GSCSI(res->cfg_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) pmcraid_cancel_all(cmd, sense_copied);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) else if (sense_copied)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) pmcraid_erp_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) pmcraid_request_sense(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) case PMCRAID_IOASC_NR_INIT_CMD_REQUIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) if (PMCRAID_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) scsi_cmd->result |= (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) * pmcraid_reset_device - device reset handler functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) * @scsi_cmd: scsi command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) * @modifier: reset modifier indicating the reset sequence to be performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) * This function issues a device reset to the affected device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) * A LUN reset will be sent to the device first. If that does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) * not work, a target reset will be sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) * SUCCESS / FAILED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) static int pmcraid_reset_device(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) struct scsi_cmnd *scsi_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) unsigned long timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) u8 modifier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) struct pmcraid_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) struct pmcraid_instance *pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) struct pmcraid_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) struct pmcraid_ioarcb *ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) u32 ioasc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) pinstance =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) res = scsi_cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) sdev_printk(KERN_ERR, scsi_cmd->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) "reset_device: NULL resource pointer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) /* If adapter is currently going through reset/reload, return failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) * This will force the mid-layer to call _eh_bus/host reset, which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) * will then go to sleep and wait for the reset to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) if (pinstance->ioa_reset_in_progress ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) pinstance->ioa_state == IOA_STATE_DEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) res->reset_progress = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) pmcraid_info("Resetting %s resource with addr %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) ((modifier & RESET_DEVICE_LUN) ? "LUN" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) ((modifier & RESET_DEVICE_TARGET) ? "TARGET" : "BUS")),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) le32_to_cpu(res->cfg_entry.resource_address));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) /* get a free cmd block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) cmd = pmcraid_get_free_cmd(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) if (cmd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) pmcraid_err("%s: no cmd blocks are available\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) ioarcb = &cmd->ioa_cb->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) ioarcb->resource_handle = res->cfg_entry.resource_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) ioarcb->request_type = REQ_TYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) ioarcb->cdb[0] = PMCRAID_RESET_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) /* Initialize reset modifier bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) if (modifier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) modifier = ENABLE_RESET_MODIFIER | modifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) ioarcb->cdb[1] = modifier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) init_completion(&cmd->wait_for_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) cmd->completion_req = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) pmcraid_info("cmd(CDB[0] = %x) for %x with index = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) cmd->ioa_cb->ioarcb.cdb[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) pmcraid_send_cmd(cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) pmcraid_internal_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) pmcraid_timeout_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) /* RESET_DEVICE command completes after all pending IOARCBs are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) * completed. Once this command is completed, pmcraind_internal_done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) * will wake up the 'completion' queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) wait_for_completion(&cmd->wait_for_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) /* complete the command here itself and return the command block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) * to free list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) pmcraid_return_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) res->reset_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) /* set the return value based on the returned ioasc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) * _pmcraid_io_done - helper for pmcraid_io_done function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) * @cmd: pointer to pmcraid command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) * @reslen: residual data length to be set in the ioasa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) * @ioasc: ioasc either returned by IOA or set by driver itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) * This function is invoked by pmcraid_io_done to complete mid-layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) * scsi ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) * 0 if caller is required to return it to free_pool. Returns 1 if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) * caller need not worry about freeing command block as error handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) * will take care of that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) static int _pmcraid_io_done(struct pmcraid_cmd *cmd, int reslen, int ioasc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) scsi_set_resid(scsi_cmd, reslen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) pmcraid_info("response(%d) CDB[0] = %x ioasc:result: %x:%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) cmd->ioa_cb->ioarcb.cdb[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) ioasc, scsi_cmd->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) if (PMCRAID_IOASC_SENSE_KEY(ioasc) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) rc = pmcraid_error_handler(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) scsi_dma_unmap(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) scsi_cmd->scsi_done(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) * pmcraid_io_done - SCSI completion function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) * @cmd: pointer to pmcraid command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) * This function is invoked by tasklet/mid-layer error handler to completing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) * the SCSI ops sent from mid-layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) static void pmcraid_io_done(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) u32 reslen = le32_to_cpu(cmd->ioa_cb->ioasa.residual_data_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) if (_pmcraid_io_done(cmd, reslen, ioasc) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) pmcraid_return_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) * pmcraid_abort_cmd - Aborts a single IOARCB already submitted to IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) * @cmd: command block of the command to be aborted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) * Return Value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) * returns pointer to command structure used as cancelling cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) struct pmcraid_cmd *cancel_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) struct pmcraid_instance *pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) pinstance = (struct pmcraid_instance *)cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) cancel_cmd = pmcraid_get_free_cmd(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) if (cancel_cmd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) pmcraid_err("%s: no cmd blocks are available\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) pmcraid_prepare_cancel_cmd(cancel_cmd, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) pmcraid_info("aborting command CDB[0]= %x with index = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) cmd->ioa_cb->ioarcb.cdb[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) init_completion(&cancel_cmd->wait_for_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) cancel_cmd->completion_req = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) pmcraid_info("command (%d) CDB[0] = %x for %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.response_handle) >> 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) cancel_cmd->ioa_cb->ioarcb.cdb[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.resource_handle));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) pmcraid_send_cmd(cancel_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) pmcraid_internal_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) PMCRAID_INTERNAL_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) pmcraid_timeout_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) return cancel_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) * pmcraid_abort_complete - Waits for ABORT TASK completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) * @cancel_cmd: command block use as cancelling command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) * Return Value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) * returns SUCCESS if ABORT TASK has good completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) * otherwise FAILED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) static int pmcraid_abort_complete(struct pmcraid_cmd *cancel_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) struct pmcraid_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) u32 ioasc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) wait_for_completion(&cancel_cmd->wait_for_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) res = cancel_cmd->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) cancel_cmd->res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) /* If the abort task is not timed out we will get a Good completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) * as sense_key, otherwise we may get one the following responses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) * due to subsequent bus reset or device reset. In case IOASC is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) * NR_SYNC_REQUIRED, set sync_reqd flag for the corresponding resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) if (ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) res->sync_reqd = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) ioasc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) /* complete the command here itself */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) pmcraid_return_cmd(cancel_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) * pmcraid_eh_abort_handler - entry point for aborting a single task on errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) * @scsi_cmd: scsi command struct given by mid-layer. When this is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) * mid-layer ensures that no other commands are queued. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) * never gets called under interrupt, but a separate eh thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) * SUCCESS / FAILED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) struct pmcraid_instance *pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) struct pmcraid_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) struct pmcraid_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) unsigned long host_lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) unsigned long pending_lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) struct pmcraid_cmd *cancel_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) int cmd_found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) int rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) pinstance =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) scmd_printk(KERN_INFO, scsi_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) "I/O command timed out, aborting it.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) res = scsi_cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) if (res == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) /* If we are currently going through reset/reload, return failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) * This will force the mid-layer to eventually call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) * pmcraid_eh_host_reset which will then go to sleep and wait for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) * reset to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) spin_lock_irqsave(pinstance->host->host_lock, host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) if (pinstance->ioa_reset_in_progress ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) pinstance->ioa_state == IOA_STATE_DEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) spin_unlock_irqrestore(pinstance->host->host_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) /* loop over pending cmd list to find cmd corresponding to this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) * scsi_cmd. Note that this command might not have been completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) * already. locking: all pending commands are protected with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) * pending_pool_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) spin_lock_irqsave(&pinstance->pending_pool_lock, pending_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) list_for_each_entry(cmd, &pinstance->pending_cmd_pool, free_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) if (cmd->scsi_cmd == scsi_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) cmd_found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) spin_unlock_irqrestore(&pinstance->pending_pool_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) pending_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) /* If the command to be aborted was given to IOA and still pending with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) * it, send ABORT_TASK to abort this and wait for its completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) if (cmd_found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) cancel_cmd = pmcraid_abort_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) spin_unlock_irqrestore(pinstance->host->host_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) if (cancel_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) cancel_cmd->res = cmd->scsi_cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) rc = pmcraid_abort_complete(cancel_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) return cmd_found ? rc : SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) * pmcraid_eh_xxxx_reset_handler - bus/target/device reset handler callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) * @scmd: pointer to scsi_cmd that was sent to the resource to be reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) * All these routines invokve pmcraid_reset_device with appropriate parameters.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) * Since these are called from mid-layer EH thread, no other IO will be queued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) * to the resource being reset. However, control path (IOCTL) may be active so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) * it is necessary to synchronize IOARRIN writes which pmcraid_reset_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) * takes care by locking/unlocking host_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) * SUCCESS or FAILED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) scmd_printk(KERN_INFO, scmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) "resetting device due to an I/O command timeout.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) return pmcraid_reset_device(scmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) PMCRAID_INTERNAL_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) RESET_DEVICE_LUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) scmd_printk(KERN_INFO, scmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) "Doing bus reset due to an I/O command timeout.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) return pmcraid_reset_device(scmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) PMCRAID_RESET_BUS_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) RESET_DEVICE_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) scmd_printk(KERN_INFO, scmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) "Doing target reset due to an I/O command timeout.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) return pmcraid_reset_device(scmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) PMCRAID_INTERNAL_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) RESET_DEVICE_TARGET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) * pmcraid_eh_host_reset_handler - adapter reset handler callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) * @scmd: pointer to scsi_cmd that was sent to a resource of adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) * Initiates adapter reset to bring it up to operational state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) * SUCCESS or FAILED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) unsigned long interval = 10000; /* 10 seconds interval */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) int waits = jiffies_to_msecs(PMCRAID_RESET_HOST_TIMEOUT) / interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) struct pmcraid_instance *pinstance =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) (struct pmcraid_instance *)(scmd->device->host->hostdata);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) /* wait for an additional 150 seconds just in case firmware could come
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) * up and if it could complete all the pending commands excluding the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) * two HCAM (CCN and LDN).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) while (waits--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) if (atomic_read(&pinstance->outstanding_cmds) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) PMCRAID_MAX_HCAM_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) msleep(interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) dev_err(&pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) "Adapter being reset due to an I/O command timeout.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) return pmcraid_reset_bringup(pinstance) == 0 ? SUCCESS : FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) * pmcraid_init_ioadls - initializes IOADL related fields in IOARCB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) * @cmd: pmcraid command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) * @sgcount: count of scatter-gather elements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) * returns pointer pmcraid_ioadl_desc, initialized to point to internal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) * or external IOADLs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) static struct pmcraid_ioadl_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) struct pmcraid_ioadl_desc *ioadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) int ioadl_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) if (ioarcb->add_cmd_param_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) ioadl_count = DIV_ROUND_UP(le16_to_cpu(ioarcb->add_cmd_param_length), 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc) * sgcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) if ((sgcount + ioadl_count) > (ARRAY_SIZE(ioarcb->add_data.u.ioadl))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) /* external ioadls start at offset 0x80 from control_block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) * structure, re-using 24 out of 27 ioadls part of IOARCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) * It is necessary to indicate to firmware that driver is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) * using ioadls to be treated as external to IOARCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) ioarcb->ioadl_bus_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) cpu_to_le64((cmd->ioa_cb_bus_addr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) offsetof(struct pmcraid_ioarcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) add_data.u.ioadl[3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) ioadl = &ioarcb->add_data.u.ioadl[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) ioarcb->ioadl_bus_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) cpu_to_le64((cmd->ioa_cb_bus_addr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) offsetof(struct pmcraid_ioarcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) add_data.u.ioadl[ioadl_count]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) ioadl = &ioarcb->add_data.u.ioadl[ioadl_count];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) ioarcb->ioarcb_bus_addr |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) cpu_to_le64(DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) return ioadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) * pmcraid_build_ioadl - Build a scatter/gather list and map the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) * @pinstance: pointer to adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) * @cmd: pmcraid command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) * This function is invoked by queuecommand entry point while sending a command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) * to firmware. This builds ioadl descriptors and sets up ioarcb fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) * 0 on success or -1 on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) static int pmcraid_build_ioadl(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) struct pmcraid_instance *pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) struct pmcraid_cmd *cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) int i, nseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) struct scatterlist *sglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) struct pmcraid_ioadl_desc *ioadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) u32 length = scsi_bufflen(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) if (!length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) nseg = scsi_dma_map(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) if (nseg < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) scmd_printk(KERN_ERR, scsi_cmd, "scsi_map_dma failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) } else if (nseg > PMCRAID_MAX_IOADLS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) scsi_dma_unmap(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) scmd_printk(KERN_ERR, scsi_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) "sg count is (%d) more than allowed!\n", nseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) /* Initialize IOARCB data transfer length fields */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) ioarcb->request_flags0 |= NO_LINK_DESCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) ioarcb->data_transfer_length = cpu_to_le32(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) ioadl = pmcraid_init_ioadls(cmd, nseg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) /* Initialize IOADL descriptor addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) scsi_for_each_sg(scsi_cmd, sglist, nseg, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) ioadl[i].data_len = cpu_to_le32(sg_dma_len(sglist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) ioadl[i].address = cpu_to_le64(sg_dma_address(sglist));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) ioadl[i].flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) /* setup last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186) ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192) * pmcraid_free_sglist - Frees an allocated SG buffer list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) * @sglist: scatter/gather list pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195) * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) sgl_free_order(sglist->scatterlist, sglist->order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) kfree(sglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) * pmcraid_alloc_sglist - Allocates memory for a SG list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) * @buflen: buffer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210) * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) * list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214) * pointer to sglist / NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) struct pmcraid_sglist *sglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) int sg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) order = (sg_size > 0) ? get_order(sg_size) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) /* Allocate a scatter/gather list for the DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226) sglist = kzalloc(sizeof(struct pmcraid_sglist), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) if (sglist == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) sglist->order = order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) sgl_alloc_order(buflen, order, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) GFP_KERNEL | GFP_DMA | __GFP_ZERO, &sglist->num_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) return sglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238) * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) * @sglist: scatter/gather list pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) * @buffer: buffer pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) * @len: buffer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242) * @direction: data transfer direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) * 0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) static int pmcraid_copy_sglist(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) struct pmcraid_sglist *sglist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) void __user *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) u32 len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) int direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) int bsize_elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) /* Determine the actual number of bytes per element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) bsize_elem = PAGE_SIZE * (1 << sglist->order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) sg = sglist->scatterlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) struct page *page = sg_page(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) kaddr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) if (direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) rc = copy_from_user(kaddr, buffer, bsize_elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) rc = copy_to_user(buffer, kaddr, bsize_elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) pmcraid_err("failed to copy user data into sg list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) sg->length = bsize_elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) if (len % bsize_elem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) struct page *page = sg_page(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) kaddr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) if (direction == DMA_TO_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) rc = copy_from_user(kaddr, buffer, len % bsize_elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) rc = copy_to_user(buffer, kaddr, len % bsize_elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) sg->length = len % bsize_elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) pmcraid_err("failed to copy user data into sg list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) * pmcraid_queuecommand - Queue a mid-layer request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311) * @scsi_cmd: scsi command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) * @done: done function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) * This function queues a request generated by the mid-layer. Midlayer calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) * this routine within host->lock. Some of the functions called by queuecommand
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) * would use cmd block queue locks (free_pool_lock and pending_pool_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) * 0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) * SCSI_MLQUEUE_HOST_BUSY if host is busy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) static int pmcraid_queuecommand_lck(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) struct scsi_cmnd *scsi_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) void (*done) (struct scsi_cmnd *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) struct pmcraid_instance *pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) struct pmcraid_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) struct pmcraid_ioarcb *ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) struct pmcraid_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332) u32 fw_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) pinstance =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) (struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) scsi_cmd->scsi_done = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) res = scsi_cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) scsi_cmd->result = (DID_OK << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) /* if adapter is marked as dead, set result to DID_NO_CONNECT complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) * the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345) if (pinstance->ioa_state == IOA_STATE_DEAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) pmcraid_info("IOA is dead, but queuecommand is scheduled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) scsi_cmd->result = (DID_NO_CONNECT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) scsi_cmd->scsi_done(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) /* If IOA reset is in progress, can't queue the commands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353) if (pinstance->ioa_reset_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) /* Firmware doesn't support SYNCHRONIZE_CACHE command (0x35), complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) * the command here itself with success return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) if (scsi_cmd->cmnd[0] == SYNCHRONIZE_CACHE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360) pmcraid_info("SYNC_CACHE(0x35), completing in driver itself\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) scsi_cmd->scsi_done(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) /* initialize the command and IOARCB to be sent to IOA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) cmd = pmcraid_get_free_cmd(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) if (cmd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) pmcraid_err("free command block is not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) cmd->scsi_cmd = scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) ioarcb = &(cmd->ioa_cb->ioarcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) memcpy(ioarcb->cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) ioarcb->resource_handle = res->cfg_entry.resource_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377) ioarcb->request_type = REQ_TYPE_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) /* set hrrq number where the IOA should respond to. Note that all cmds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) * generated internally uses hrrq_id 0, exception to this is the cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) * hrrq_id assigned here in queuecommand
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384) ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) pinstance->num_hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386) cmd->cmd_done = pmcraid_io_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) if (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) if (scsi_cmd->underflow == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) ioarcb->request_flags0 |= INHIBIT_UL_CHECK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) if (res->sync_reqd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) ioarcb->request_flags0 |= SYNC_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) res->sync_reqd = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) ioarcb->request_flags0 |= NO_LINK_DESCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) if (scsi_cmd->flags & SCMD_TAGGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) ioarcb->request_flags1 |= TASK_TAG_SIMPLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) if (RES_IS_GSCSI(res->cfg_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) ioarcb->request_flags1 |= DELAY_AFTER_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) rc = pmcraid_build_ioadl(pinstance, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) pmcraid_info("command (%d) CDB[0] = %x for %x:%x:%x:%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) le32_to_cpu(ioarcb->response_handle) >> 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) scsi_cmd->cmnd[0], pinstance->host->unique_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) RES_IS_VSET(res->cfg_entry) ? PMCRAID_VSET_BUS_ID :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) PMCRAID_PHYS_BUS_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413) RES_IS_VSET(res->cfg_entry) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) (fw_version <= PMCRAID_FW_VERSION_1 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) res->cfg_entry.unique_flags1 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) le16_to_cpu(res->cfg_entry.array_id) & 0xFF) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) RES_TARGET(res->cfg_entry.resource_address),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) RES_LUN(res->cfg_entry.resource_address));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) if (likely(rc == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) _pmcraid_fire_command(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) pmcraid_err("queuecommand could not build ioadl\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) pmcraid_return_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) rc = SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) static DEF_SCSI_QCMD(pmcraid_queuecommand)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) * pmcraid_open -char node "open" entry, allowed only users with admin access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) static int pmcraid_chr_open(struct inode *inode, struct file *filep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) struct pmcraid_instance *pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) /* Populate adapter instance * pointer for use by ioctl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) pinstance = container_of(inode->i_cdev, struct pmcraid_instance, cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) filep->private_data = pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) * pmcraid_fasync - Async notifier registration from applications
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) * This function adds the calling process to a driver global queue. When an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454) * event occurs, SIGIO will be sent to all processes in this queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) struct pmcraid_instance *pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) pinstance = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462) mutex_lock(&pinstance->aen_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) rc = fasync_helper(fd, filep, mode, &pinstance->aen_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464) mutex_unlock(&pinstance->aen_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) * commands sent over IOCTL interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474) * @cmd : pointer to struct pmcraid_cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) * @buflen : length of the request buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) * @direction : data transfer direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) * 0 on success, non-zero error code on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) static int pmcraid_build_passthrough_ioadls(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482) struct pmcraid_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) int buflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) int direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) struct pmcraid_sglist *sglist = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) struct scatterlist *sg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490) struct pmcraid_ioadl_desc *ioadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) sglist = pmcraid_alloc_sglist(buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495) if (!sglist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) pmcraid_err("can't allocate memory for passthrough SGls\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) sglist->num_dma_sg = dma_map_sg(&cmd->drv_inst->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) sglist->scatterlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) sglist->num_sg, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) dev_err(&cmd->drv_inst->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) "Failed to map passthrough buffer!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) pmcraid_free_sglist(sglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) cmd->sglist = sglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) ioarcb->request_flags0 |= NO_LINK_DESCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514) ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) /* Initialize IOADL descriptor addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518) ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) ioadl[i].flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523) /* setup the last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531) * pmcraid_release_passthrough_ioadls - release passthrough ioadls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) * @buflen: size of the request buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) * @direction: data transfer direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) * 0 on success, non-zero error code on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) static void pmcraid_release_passthrough_ioadls(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) struct pmcraid_cmd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) int buflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543) int direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) struct pmcraid_sglist *sglist = cmd->sglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) if (buflen > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) dma_unmap_sg(&cmd->drv_inst->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550) sglist->scatterlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) sglist->num_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) pmcraid_free_sglist(sglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) cmd->sglist = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561) * @pinstance: pointer to adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) * @cmd: ioctl code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) * @arg: pointer to pmcraid_passthrough_buffer user buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) * 0 on success, non-zero error code on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) static long pmcraid_ioctl_passthrough(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) struct pmcraid_instance *pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570) unsigned int ioctl_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) unsigned int buflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) void __user *arg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) struct pmcraid_passthrough_ioctl_buffer *buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) struct pmcraid_ioarcb *ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) struct pmcraid_cmd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) struct pmcraid_cmd *cancel_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) void __user *request_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) unsigned long request_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) void __user *ioasa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) u32 ioasc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) int request_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585) int buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) u8 direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) /* If IOA reset is in progress, wait 10 secs for reset to complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) if (pinstance->ioa_reset_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) rc = wait_event_interruptible_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) pinstance->reset_wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) !pinstance->ioa_reset_in_progress,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594) msecs_to_jiffies(10000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) else if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599) return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) /* If adapter is not in operational state, return error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) pmcraid_err("IOA is not operational\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) buffer = kmalloc(buffer_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) if (!buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612) pmcraid_err("no memory for passthrough buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) request_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) request_buffer = arg + request_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621) rc = copy_from_user(buffer, arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) sizeof(struct pmcraid_passthrough_ioctl_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) ioasa = arg + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) pmcraid_err("ioctl: can't copy passthrough buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) direction = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637) direction = DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) if (request_size < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) /* check if we have any additional command parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) if (le16_to_cpu(buffer->ioarcb.add_cmd_param_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) > PMCRAID_ADD_CMD_PARAM_LEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) cmd = pmcraid_get_free_cmd(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) if (!cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) pmcraid_err("free command block is not available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) goto out_free_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) cmd->scsi_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) ioarcb = &(cmd->ioa_cb->ioarcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) /* Copy the user-provided IOARCB stuff field by field */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) ioarcb->resource_handle = buffer->ioarcb.resource_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) ioarcb->request_type = buffer->ioarcb.request_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670) memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) if (buffer->ioarcb.add_cmd_param_length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) ioarcb->add_cmd_param_length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674) buffer->ioarcb.add_cmd_param_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) ioarcb->add_cmd_param_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) buffer->ioarcb.add_cmd_param_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) memcpy(ioarcb->add_data.u.add_cmd_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) buffer->ioarcb.add_data.u.add_cmd_params,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) le16_to_cpu(buffer->ioarcb.add_cmd_param_length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) /* set hrrq number where the IOA should respond to. Note that all cmds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) * generated internally uses hrrq_id 0, exception to this is the cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684) * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) * hrrq_id assigned here in queuecommand
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) pinstance->num_hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) if (request_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) rc = pmcraid_build_passthrough_ioadls(cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) request_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) pmcraid_err("couldn't build passthrough ioadls\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) goto out_free_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) /* If data is being written into the device, copy the data from user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) * buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) if (direction == DMA_TO_DEVICE && request_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704) rc = pmcraid_copy_sglist(cmd->sglist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) request_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) request_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709) pmcraid_err("failed to copy user buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) goto out_free_sglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) /* passthrough ioctl is a blocking command so, put the user to sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) * until timeout. Note that a timeout value of 0 means, do timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) cmd->cmd_done = pmcraid_internal_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) init_completion(&cmd->wait_for_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) cmd->completion_req = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) cmd->ioa_cb->ioarcb.cdb[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724) le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) _pmcraid_fire_command(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) /* NOTE ! Remove the below line once abort_task is implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) * in firmware. This line disables ioctl command timeout handling logic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732) * similar to IO command timeout handling, making ioctl commands to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) * until the command completion regardless of timeout value specified in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734) * ioarcb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) buffer->ioarcb.cmd_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) /* If command timeout is specified put caller to wait till that time,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) * otherwise it would be blocking wait. If command gets timed out, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) * will be aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) if (buffer->ioarcb.cmd_timeout == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) wait_for_completion(&cmd->wait_for_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) } else if (!wait_for_completion_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) &cmd->wait_for_completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746) msecs_to_jiffies(le16_to_cpu(buffer->ioarcb.cmd_timeout) * 1000))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748) pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) cmd->ioa_cb->ioarcb.cdb[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) cancel_cmd = pmcraid_abort_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) if (cancel_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) wait_for_completion(&cancel_cmd->wait_for_completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) pmcraid_return_cmd(cancel_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) /* if abort task couldn't find the command i.e it got
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) * completed prior to aborting, return good completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) * if command got aborted successfully or there was IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) * reset due to abort task itself getting timedout then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) * return -ETIMEDOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769) if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) rc = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) goto out_handle_response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) /* no command block for abort task or abort task failed to abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) * the IOARCB, then wait for 150 more seconds and initiate reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) * sequence after timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) if (!wait_for_completion_timeout(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) &cmd->wait_for_completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) msecs_to_jiffies(150 * 1000))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) pmcraid_reset_bringup(cmd->drv_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) rc = -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) out_handle_response:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) /* copy entire IOASA buffer and return IOCTL success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) * If copying IOASA to user-buffer fails, return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) * EFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) sizeof(struct pmcraid_ioasa))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) pmcraid_err("failed to copy ioasa buffer to user\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) /* If the data transfer was from device, copy the data onto user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) * buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801) else if (direction == DMA_FROM_DEVICE && request_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) rc = pmcraid_copy_sglist(cmd->sglist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) request_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) request_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) pmcraid_err("failed to copy user buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) out_free_sglist:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) out_free_cmd:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816) pmcraid_return_cmd(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) out_free_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) kfree(buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) * pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830) * @pinstance: pointer to adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) * @cmd: ioctl command passed in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) * @buflen: length of user_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) * @user_buffer: user buffer pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) * 0 in case of success, otherwise appropriate error code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) static long pmcraid_ioctl_driver(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839) struct pmcraid_instance *pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) unsigned int buflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) void __user *user_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) int rc = -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) case PMCRAID_IOCTL_RESET_ADAPTER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) pmcraid_reset_bringup(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861) * pmcraid_check_ioctl_buffer - check for proper access to user buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) * @cmd: ioctl command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) * @arg: user buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) * @hdr: pointer to kernel memory for pmcraid_ioctl_header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868) * negetive error code if there are access issues, otherwise zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) * Upon success, returns ioctl header copied out of user buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) static int pmcraid_check_ioctl_buffer(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) void __user *arg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) struct pmcraid_ioctl_header *hdr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) if (copy_from_user(hdr, arg, sizeof(struct pmcraid_ioctl_header))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) pmcraid_err("couldn't copy ioctl header from user buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) /* check for valid driver signature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) rc = memcmp(hdr->signature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) PMCRAID_IOCTL_SIGNATURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) sizeof(hdr->signature));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) pmcraid_err("signature verification failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) * pmcraid_ioctl - char node ioctl entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) static long pmcraid_chr_ioctl(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) struct file *filep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) unsigned long arg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) struct pmcraid_instance *pinstance = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) struct pmcraid_ioctl_header *hdr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908) void __user *argp = (void __user *)arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) int retval = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911) hdr = kmalloc(sizeof(struct pmcraid_ioctl_header), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) if (!hdr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) pmcraid_err("failed to allocate memory for ioctl header\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) retval = pmcraid_check_ioctl_buffer(cmd, argp, hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) pmcraid_info("chr_ioctl: header check failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) kfree(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) pinstance = filep->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) if (!pinstance) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) pmcraid_info("adapter instance is not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) kfree(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) switch (_IOC_TYPE(cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) case PMCRAID_PASSTHROUGH_IOCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) /* If ioctl code is to download microcode, we need to block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938) * mid-layer requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) scsi_block_requests(pinstance->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943) retval = pmcraid_ioctl_passthrough(pinstance, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) hdr->buffer_length, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) scsi_unblock_requests(pinstance->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) case PMCRAID_DRIVER_IOCTL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) arg += sizeof(struct pmcraid_ioctl_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952) retval = pmcraid_ioctl_driver(pinstance, cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) hdr->buffer_length, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) retval = -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) kfree(hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) * File operations structure for management interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) static const struct file_operations pmcraid_fops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) .owner = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971) .open = pmcraid_chr_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) .fasync = pmcraid_chr_fasync,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) .unlocked_ioctl = pmcraid_chr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) .compat_ioctl = compat_ptr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) .llseek = noop_llseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) * pmcraid_show_log_level - Display adapter's error logging level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) * @dev: class device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) * @buf: buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) * number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) static ssize_t pmcraid_show_log_level(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) struct pmcraid_instance *pinstance =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) (struct pmcraid_instance *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) return snprintf(buf, PAGE_SIZE, "%d\n", pinstance->current_log_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) * pmcraid_store_log_level - Change the adapter's error logging level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) * @dev: class device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) * @buf: buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) * @count: not used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) * number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) static ssize_t pmcraid_store_log_level(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) const char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) size_t count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) struct Scsi_Host *shost;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) struct pmcraid_instance *pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) if (kstrtou8(buf, 10, &val))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) /* log-level should be from 0 to 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) if (val > 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) pinstance = (struct pmcraid_instance *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) pinstance->current_log_level = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030) return strlen(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033) static struct device_attribute pmcraid_log_level_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) .attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) .name = "log_level",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) .mode = S_IRUGO | S_IWUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) .show = pmcraid_show_log_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) .store = pmcraid_store_log_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) * pmcraid_show_drv_version - Display driver version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) * @dev: class device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045) * @buf: buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) * number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050) static ssize_t pmcraid_show_drv_version(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) char *buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) return snprintf(buf, PAGE_SIZE, "version: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) PMCRAID_DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) static struct device_attribute pmcraid_driver_version_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061) .attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) .name = "drv_version",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) .mode = S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) .show = pmcraid_show_drv_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) * pmcraid_show_io_adapter_id - Display driver assigned adapter id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) * @dev: class device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) * @buf: buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) * number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) static ssize_t pmcraid_show_adapter_id(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) char *buf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) struct pmcraid_instance *pinstance =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) (struct pmcraid_instance *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) u32 adapter_id = (pinstance->pdev->bus->number << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) pinstance->pdev->devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087) u32 aen_group = pmcraid_event_family.id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) return snprintf(buf, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) "adapter id: %d\nminor: %d\naen group: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) adapter_id, MINOR(pinstance->cdev.dev), aen_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) static struct device_attribute pmcraid_adapter_id_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095) .attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) .name = "adapter_id",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) .mode = S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) .show = pmcraid_show_adapter_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) static struct device_attribute *pmcraid_host_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) &pmcraid_log_level_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) &pmcraid_driver_version_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105) &pmcraid_adapter_id_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) /* host template structure for pmcraid driver */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) static struct scsi_host_template pmcraid_host_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) .name = PMCRAID_DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) .queuecommand = pmcraid_queuecommand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) .eh_abort_handler = pmcraid_eh_abort_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) .eh_bus_reset_handler = pmcraid_eh_bus_reset_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) .eh_target_reset_handler = pmcraid_eh_target_reset_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) .eh_device_reset_handler = pmcraid_eh_device_reset_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) .eh_host_reset_handler = pmcraid_eh_host_reset_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) .slave_alloc = pmcraid_slave_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) .slave_configure = pmcraid_slave_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) .slave_destroy = pmcraid_slave_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) .change_queue_depth = pmcraid_change_queue_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) .can_queue = PMCRAID_MAX_IO_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) .this_id = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) .sg_tablesize = PMCRAID_MAX_IOADLS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) .max_sectors = PMCRAID_IOA_MAX_SECTORS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) .no_write_same = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) .cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) .shost_attrs = pmcraid_host_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) .proc_name = PMCRAID_DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) * pmcraid_isr_msix - implements MSI-X interrupt handling routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) * @irq: interrupt vector number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) * @dev_id: pointer hrrq_vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) * IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) static irqreturn_t pmcraid_isr_msix(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) struct pmcraid_isr_param *hrrq_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) struct pmcraid_instance *pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) u32 intrs_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) int hrrq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) hrrq_vector = (struct pmcraid_isr_param *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) hrrq_id = hrrq_vector->hrrq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) pinstance = hrrq_vector->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) if (!hrrq_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) /* Read the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) intrs_val = pmcraid_read_interrupts(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) if (intrs_val &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) ((ioread32(pinstance->int_regs.host_ioa_interrupt_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161) & DOORBELL_INTR_MSIX_CLR) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) /* Any error interrupts including unit_check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) * initiate IOA reset.In case of unit check indicate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164) * to reset_sequence that IOA unit checked and prepare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) * for a dump during reset sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167) if (intrs_val & PMCRAID_ERROR_INTERRUPTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) if (intrs_val & INTRS_IOA_UNIT_CHECK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) pinstance->ioa_unit_check = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171) pmcraid_err("ISR: error interrupts: %x \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) initiating reset\n", intrs_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) spin_lock_irqsave(pinstance->host->host_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) pmcraid_initiate_reset(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) spin_unlock_irqrestore(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) pinstance->host->host_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178) lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) /* If interrupt was as part of the ioa initialization,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) * clear it. Delete the timer and wakeup the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) * reset engine to proceed with reset sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) if (intrs_val & INTRS_TRANSITION_TO_OPERATIONAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) pmcraid_clr_trans_op(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187) /* Clear the interrupt register by writing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) * to host to ioa doorbell. Once done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) * FW will clear the interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) iowrite32(DOORBELL_INTR_MSIX_CLR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192) pinstance->int_regs.host_ioa_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) tasklet_schedule(&(pinstance->isr_tasklet[hrrq_id]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) * pmcraid_isr - implements legacy interrupt handling routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) * @irq: interrupt vector number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) * @dev_id: pointer hrrq_vector
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) * IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) static irqreturn_t pmcraid_isr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) struct pmcraid_isr_param *hrrq_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216) struct pmcraid_instance *pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) u32 intrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219) int hrrq_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221) /* In case of legacy interrupt mode where interrupts are shared across
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) * isrs, it may be possible that the current interrupt is not from IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224) if (!dev_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) printk(KERN_INFO "%s(): NULL host pointer\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) hrrq_vector = (struct pmcraid_isr_param *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) pinstance = hrrq_vector->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) intrs = pmcraid_read_interrupts(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) if (unlikely((intrs & PMCRAID_PCI_INTERRUPTS) == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) /* Any error interrupts including unit_check, initiate IOA reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) * In case of unit check indicate to reset_sequence that IOA unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) * checked and prepare for a dump during reset sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) if (intrs & PMCRAID_ERROR_INTERRUPTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) if (intrs & INTRS_IOA_UNIT_CHECK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) pinstance->ioa_unit_check = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) iowrite32(intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) pinstance->int_regs.ioa_host_interrupt_clr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) pmcraid_err("ISR: error interrupts: %x initiating reset\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248) intrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) intrs = ioread32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) pinstance->int_regs.ioa_host_interrupt_clr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) pmcraid_initiate_reset(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) /* If interrupt was as part of the ioa initialization,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) * clear. Delete the timer and wakeup the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) * reset engine to proceed with reset sequence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) pmcraid_clr_trans_op(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) iowrite32(intrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) pinstance->int_regs.ioa_host_interrupt_clr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) ioread32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) pinstance->int_regs.ioa_host_interrupt_clr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) tasklet_schedule(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) &(pinstance->isr_tasklet[hrrq_id]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) * pmcraid_worker_function - worker thread function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) * @workp: pointer to struct work queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) static void pmcraid_worker_function(struct work_struct *workp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287) struct pmcraid_instance *pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) struct pmcraid_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) struct pmcraid_resource_entry *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) unsigned long host_lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293) u16 fw_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) u8 bus, target, lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) pinstance = container_of(workp, struct pmcraid_instance, worker_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) /* add resources only after host is added into system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) if (!atomic_read(&pinstance->expose_resources))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) if (res->change_detected == RES_CHANGE_DEL && res->scsi_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) sdev = res->scsi_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) /* host_lock must be held before calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) * scsi_device_get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) spin_lock_irqsave(pinstance->host->host_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314) if (!scsi_device_get(sdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) spin_unlock_irqrestore(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) pinstance->host->host_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317) host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) pmcraid_info("deleting %x from midlayer\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) res->cfg_entry.resource_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) list_move_tail(&res->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) &pinstance->free_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) spin_unlock_irqrestore(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) &pinstance->resource_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) scsi_remove_device(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) spin_lock_irqsave(&pinstance->resource_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328) lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) res->change_detected = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331) spin_unlock_irqrestore(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) pinstance->host->host_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) list_for_each_entry(res, &pinstance->used_res_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) if (res->change_detected == RES_CHANGE_ADD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) if (!pmcraid_expose_resource(fw_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) &res->cfg_entry))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) if (RES_IS_VSET(res->cfg_entry)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) bus = PMCRAID_VSET_BUS_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) if (fw_version <= PMCRAID_FW_VERSION_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) target = res->cfg_entry.unique_flags1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) target = le16_to_cpu(res->cfg_entry.array_id) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) lun = PMCRAID_VSET_LUN_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) bus = PMCRAID_PHYS_BUS_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) target =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) RES_TARGET(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) res->cfg_entry.resource_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358) lun = RES_LUN(res->cfg_entry.resource_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) res->change_detected = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) spin_unlock_irqrestore(&pinstance->resource_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) scsi_add_device(pinstance->host, bus, target, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365) spin_lock_irqsave(&pinstance->resource_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374) * pmcraid_tasklet_function - Tasklet function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) * @instance: pointer to msix param structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) static void pmcraid_tasklet_function(unsigned long instance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) struct pmcraid_isr_param *hrrq_vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) struct pmcraid_instance *pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) unsigned long hrrq_lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) unsigned long pending_lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387) unsigned long host_lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) spinlock_t *lockp; /* hrrq buffer lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) u32 resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) hrrq_vector = (struct pmcraid_isr_param *)instance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393) pinstance = hrrq_vector->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) id = hrrq_vector->hrrq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395) lockp = &(pinstance->hrrq_lock[id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) /* loop through each of the commands responded by IOA. Each HRRQ buf is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) * protected by its own lock. Traversals must be done within this lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) * as there may be multiple tasklets running on multiple CPUs. Note
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) * that the lock is held just for picking up the response handle and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) * manipulating hrrq_curr/toggle_bit values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) spin_lock_irqsave(lockp, hrrq_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) while ((resp & HRRQ_TOGGLE_BIT) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) pinstance->host_toggle_bit[id]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) int cmd_index = resp >> 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) struct pmcraid_cmd *cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) if (pinstance->hrrq_curr[id] < pinstance->hrrq_end[id]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) pinstance->hrrq_curr[id]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) pinstance->hrrq_curr[id] = pinstance->hrrq_start[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417) pinstance->host_toggle_bit[id] ^= 1u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) if (cmd_index >= PMCRAID_MAX_CMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) /* In case of invalid response handle, log message */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) pmcraid_err("Invalid response handle %d\n", cmd_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427) cmd = pinstance->cmd_list[cmd_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) spin_unlock_irqrestore(lockp, hrrq_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) spin_lock_irqsave(&pinstance->pending_pool_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) pending_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) list_del(&cmd->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433) spin_unlock_irqrestore(&pinstance->pending_pool_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) pending_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) del_timer(&cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) atomic_dec(&pinstance->outstanding_cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438) if (cmd->cmd_done == pmcraid_ioa_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) spin_lock_irqsave(pinstance->host->host_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) cmd->cmd_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) spin_unlock_irqrestore(pinstance->host->host_lock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) } else if (cmd->cmd_done != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) cmd->cmd_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) /* loop over until we are done with all responses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) spin_lock_irqsave(lockp, hrrq_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449) resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) spin_unlock_irqrestore(lockp, hrrq_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) * pmcraid_unregister_interrupt_handler - de-register interrupts handlers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) * @pinstance: pointer to adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) * This routine un-registers registered interrupt handler and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) * also frees irqs/vectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) * Retun Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466) void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) struct pci_dev *pdev = pinstance->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) for (i = 0; i < pinstance->num_hrrq; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) pinstance->interrupt_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) pci_free_irq_vectors(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479) * pmcraid_register_interrupt_handler - registers interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) * @pinstance: pointer to per-adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) * 0 on success, non-zero error code otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) struct pci_dev *pdev = pinstance->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) unsigned int irq_flag = PCI_IRQ_LEGACY, flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) int num_hrrq, rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) irq_handler_t isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) if (pmcraid_enable_msix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) irq_flag |= PCI_IRQ_MSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) num_hrrq = pci_alloc_irq_vectors(pdev, 1, PMCRAID_NUM_MSIX_VECTORS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) irq_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) if (num_hrrq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) return num_hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) if (pdev->msix_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) flag = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) isr = pmcraid_isr_msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) flag = IRQF_SHARED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) isr = pmcraid_isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) for (i = 0; i < num_hrrq; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) struct pmcraid_isr_param *vec = &pinstance->hrrq_vector[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) vec->hrrq_id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) vec->drv_inst = pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) rc = request_irq(pci_irq_vector(pdev, i), isr, flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) PMCRAID_DRIVER_NAME, vec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) goto out_unwind;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) pinstance->num_hrrq = num_hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) if (pdev->msix_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) pinstance->interrupt_mode = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) iowrite32(DOORBELL_INTR_MODE_MSIX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) pinstance->int_regs.host_ioa_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) out_unwind:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) while (--i > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) pci_free_irq_vectors(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538) * pmcraid_release_cmd_blocks - release buufers allocated for command blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) * @pinstance: per adapter instance structure pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) * @max_index: number of buffer blocks to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) pmcraid_release_cmd_blocks(struct pmcraid_instance *pinstance, int max_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549) for (i = 0; i < max_index; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) kmem_cache_free(pinstance->cmd_cachep, pinstance->cmd_list[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) pinstance->cmd_list[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) kmem_cache_destroy(pinstance->cmd_cachep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) pinstance->cmd_cachep = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) * pmcraid_release_control_blocks - releases buffers alloced for control blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) * @pinstance: pointer to per adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) * @max_index: number of buffers (from 0 onwards) to release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) * This function assumes that the command blocks for which control blocks are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) * linked are not released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) pmcraid_release_control_blocks(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) struct pmcraid_instance *pinstance,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) int max_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) if (pinstance->control_pool == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) for (i = 0; i < max_index; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) dma_pool_free(pinstance->control_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) pinstance->cmd_list[i]->ioa_cb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) pinstance->cmd_list[i]->ioa_cb_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) pinstance->cmd_list[i]->ioa_cb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) pinstance->cmd_list[i]->ioa_cb_bus_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) dma_pool_destroy(pinstance->control_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587) pinstance->control_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591) * pmcraid_allocate_cmd_blocks - allocate memory for cmd block structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) * @pinstance - pointer to per adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594) * Allocates memory for command blocks using kernel slab allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) * 0 in case of success; -ENOMEM in case of failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) static int pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) sprintf(pinstance->cmd_pool_name, "pmcraid_cmd_pool_%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) pinstance->host->unique_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) pinstance->cmd_cachep = kmem_cache_create(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) pinstance->cmd_pool_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) sizeof(struct pmcraid_cmd), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610) SLAB_HWCACHE_ALIGN, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) if (!pinstance->cmd_cachep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) for (i = 0; i < PMCRAID_MAX_CMD; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) pinstance->cmd_list[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) kmem_cache_alloc(pinstance->cmd_cachep, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) if (!pinstance->cmd_list[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) pmcraid_release_cmd_blocks(pinstance, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) * pmcraid_allocate_control_blocks - allocates memory control blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) * @pinstance : pointer to per adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) * This function allocates PCI memory for DMAable buffers like IOARCB, IOADLs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) * and IOASAs. This is called after command blocks are already allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) * 0 in case it can allocate all control blocks, otherwise -ENOMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) sprintf(pinstance->ctl_pool_name, "pmcraid_control_pool_%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) pinstance->host->unique_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) pinstance->control_pool =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643) dma_pool_create(pinstance->ctl_pool_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) &pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) sizeof(struct pmcraid_control_block),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) PMCRAID_IOARCB_ALIGNMENT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648) if (!pinstance->control_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) for (i = 0; i < PMCRAID_MAX_CMD; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) pinstance->cmd_list[i]->ioa_cb =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) dma_pool_zalloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) pinstance->control_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) &(pinstance->cmd_list[i]->ioa_cb_bus_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) if (!pinstance->cmd_list[i]->ioa_cb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) pmcraid_release_control_blocks(pinstance, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) * pmcraid_release_host_rrqs - release memory allocated for hrrq buffer(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) * @pinstance: pointer to per adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) * @maxindex: size of hrrq buffer pointer array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) for (i = 0; i < maxindex; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) dma_free_coherent(&pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) pinstance->hrrq_start[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) pinstance->hrrq_start_bus_addr[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) /* reset pointers and toggle bit to zeros */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686) pinstance->hrrq_start[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) pinstance->hrrq_start_bus_addr[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) pinstance->host_toggle_bit[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) * pmcraid_allocate_host_rrqs - Allocate and initialize host RRQ buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) * @pinstance: pointer to per adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) * 0 hrrq buffers are allocated, -ENOMEM otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) int i, buffer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) for (i = 0; i < pinstance->num_hrrq; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) pinstance->hrrq_start[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) dma_alloc_coherent(&pinstance->pdev->dev, buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) &pinstance->hrrq_start_bus_addr[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) if (!pinstance->hrrq_start[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) pmcraid_err("pci_alloc failed for hrrq vector : %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) pmcraid_release_host_rrqs(pinstance, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718) pinstance->hrrq_end[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) pinstance->host_toggle_bit[i] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) spin_lock_init(&pinstance->hrrq_lock[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) * pmcraid_release_hcams - release HCAM buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) * @pinstance: pointer to per adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734) static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) if (pinstance->ccn.msg != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) dma_free_coherent(&pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) PMCRAID_AEN_HDR_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) sizeof(struct pmcraid_hcam_ccn_ext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) pinstance->ccn.msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) pinstance->ccn.baddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) pinstance->ccn.msg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) pinstance->ccn.hcam = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) pinstance->ccn.baddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748) if (pinstance->ldn.msg != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) dma_free_coherent(&pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) PMCRAID_AEN_HDR_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) sizeof(struct pmcraid_hcam_ldn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) pinstance->ldn.msg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) pinstance->ldn.baddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) pinstance->ldn.msg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756) pinstance->ldn.hcam = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) pinstance->ldn.baddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762) * pmcraid_allocate_hcams - allocates HCAM buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) * @pinstance : pointer to per adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) * Return Value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) * 0 in case of successful allocation, non-zero otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) pinstance->ccn.msg = dma_alloc_coherent(&pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) PMCRAID_AEN_HDR_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) sizeof(struct pmcraid_hcam_ccn_ext),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) &pinstance->ccn.baddr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) pinstance->ldn.msg = dma_alloc_coherent(&pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) PMCRAID_AEN_HDR_SIZE +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) sizeof(struct pmcraid_hcam_ldn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) &pinstance->ldn.baddr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780) if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) pmcraid_release_hcams(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) pinstance->ccn.hcam =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) (void *)pinstance->ccn.msg + PMCRAID_AEN_HDR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) pinstance->ldn.hcam =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) (void *)pinstance->ldn.msg + PMCRAID_AEN_HDR_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) atomic_set(&pinstance->ccn.ignore, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) atomic_set(&pinstance->ldn.ignore, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) return (pinstance->ldn.msg == NULL) ? -ENOMEM : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) * pmcraid_release_config_buffers - release config.table buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) * @pinstance: pointer to per adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) if (pinstance->cfg_table != NULL &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) pinstance->cfg_table_bus_addr != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) dma_free_coherent(&pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) sizeof(struct pmcraid_config_table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) pinstance->cfg_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) pinstance->cfg_table_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) pinstance->cfg_table = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) pinstance->cfg_table_bus_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) if (pinstance->res_entries != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) list_del(&pinstance->res_entries[i].queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) kfree(pinstance->res_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) pinstance->res_entries = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) pmcraid_release_hcams(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827) * pmcraid_allocate_config_buffers - allocates DMAable memory for config table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) * @pinstance : pointer to per adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) * 0 for successful allocation, -ENOMEM for any failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) static int pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837) pinstance->res_entries =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) kcalloc(PMCRAID_MAX_RESOURCES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) sizeof(struct pmcraid_resource_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842) if (NULL == pinstance->res_entries) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) pmcraid_err("failed to allocate memory for resource table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) list_add_tail(&pinstance->res_entries[i].queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849) &pinstance->free_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) pinstance->cfg_table = dma_alloc_coherent(&pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) sizeof(struct pmcraid_config_table),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) &pinstance->cfg_table_bus_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) if (NULL == pinstance->cfg_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) pmcraid_err("couldn't alloc DMA memory for config table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) pmcraid_release_config_buffers(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) if (pmcraid_allocate_hcams(pinstance)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) pmcraid_err("could not alloc DMA memory for HCAMS\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) pmcraid_release_config_buffers(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) * pmcraid_init_tasklets - registers tasklets for response handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) * @pinstance: pointer adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) static void pmcraid_init_tasklets(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) for (i = 0; i < pinstance->num_hrrq; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) tasklet_init(&pinstance->isr_tasklet[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) pmcraid_tasklet_function,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) (unsigned long)&pinstance->hrrq_vector[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) * pmcraid_kill_tasklets - destroys tasklets registered for response handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) * @pinstance: pointer to adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) static void pmcraid_kill_tasklets(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) for (i = 0; i < pinstance->num_hrrq; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900) tasklet_kill(&pinstance->isr_tasklet[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904) * pmcraid_release_buffers - release per-adapter buffers allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) * @pinstance: pointer to adapter soft state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) pmcraid_release_config_buffers(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) pmcraid_release_control_blocks(pinstance, PMCRAID_MAX_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) if (pinstance->inq_data != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) dma_free_coherent(&pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) sizeof(struct pmcraid_inquiry_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921) pinstance->inq_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) pinstance->inq_data_baddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924) pinstance->inq_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) pinstance->inq_data_baddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) if (pinstance->timestamp_data != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929) dma_free_coherent(&pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) sizeof(struct pmcraid_timestamp_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) pinstance->timestamp_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) pinstance->timestamp_data_baddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) pinstance->timestamp_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935) pinstance->timestamp_data_baddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940) * pmcraid_init_buffers - allocates memory and initializes various structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) * @pinstance: pointer to per adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) * This routine pre-allocates memory based on the type of block as below:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) * cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) * IOARCBs(PMCRAID_MAX_CMD) : DMAable memory, using pci pool allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946) * config-table entries : DMAable memory using dma_alloc_coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) * HostRRQs : DMAable memory, using dma_alloc_coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) * 0 in case all of the blocks are allocated, -ENOMEM otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) static int pmcraid_init_buffers(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) if (pmcraid_allocate_host_rrqs(pinstance)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) pmcraid_err("couldn't allocate memory for %d host rrqs\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) pinstance->num_hrrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) if (pmcraid_allocate_config_buffers(pinstance)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) pmcraid_err("couldn't allocate memory for config buffers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) if (pmcraid_allocate_cmd_blocks(pinstance)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) pmcraid_err("couldn't allocate memory for cmd blocks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) pmcraid_release_config_buffers(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) if (pmcraid_allocate_control_blocks(pinstance)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) pmcraid_err("couldn't allocate memory control blocks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) pmcraid_release_config_buffers(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) /* allocate DMAable memory for page D0 INQUIRY buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) pinstance->inq_data = dma_alloc_coherent(&pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) sizeof(struct pmcraid_inquiry_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) &pinstance->inq_data_baddr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987) if (pinstance->inq_data == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) pmcraid_err("couldn't allocate DMA memory for INQUIRY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) pmcraid_release_buffers(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) /* allocate DMAable memory for set timestamp data buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) pinstance->timestamp_data = dma_alloc_coherent(&pinstance->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) sizeof(struct pmcraid_timestamp_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) &pinstance->timestamp_data_baddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) if (pinstance->timestamp_data == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) pmcraid_err("couldn't allocate DMA memory for \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) set time_stamp \n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) pmcraid_release_buffers(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) /* Initialize all the command blocks and add them to free pool. No
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) * need to lock (free_pool_lock) as this is done in initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) * itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) for (i = 0; i < PMCRAID_MAX_CMD; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011) struct pmcraid_cmd *cmdp = pinstance->cmd_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) pmcraid_init_cmdblk(cmdp, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) cmdp->drv_inst = pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014) list_add_tail(&cmdp->free_list, &pinstance->free_cmd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) * pmcraid_reinit_buffers - resets various buffer pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) * @pinstance: pointer to adapter instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) int buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) for (i = 0; i < pinstance->num_hrrq; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) memset(pinstance->hrrq_start[i], 0, buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) pinstance->hrrq_end[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035) pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) pinstance->host_toggle_bit[i] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) * pmcraid_init_instance - initialize per instance data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) * @pdev: pointer to pci device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) * @host: pointer to Scsi_Host structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044) * @mapped_pci_addr: memory mapped IOA configuration registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) * 0 on success, non-zero in case of any failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) void __iomem *mapped_pci_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) struct pmcraid_instance *pinstance =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) (struct pmcraid_instance *)host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) pinstance->host = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) pinstance->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) /* Initialize register addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) pinstance->mapped_dma_addr = mapped_pci_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) /* Initialize chip-specific details */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) struct pmcraid_chip_details *chip_cfg = pinstance->chip_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) struct pmcraid_interrupts *pint_regs = &pinstance->int_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) pinstance->ioarrin = mapped_pci_addr + chip_cfg->ioarrin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) pint_regs->ioa_host_interrupt_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) mapped_pci_addr + chip_cfg->ioa_host_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) pint_regs->ioa_host_interrupt_clr_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) mapped_pci_addr + chip_cfg->ioa_host_intr_clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) pint_regs->ioa_host_msix_interrupt_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) mapped_pci_addr + chip_cfg->ioa_host_msix_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) pint_regs->host_ioa_interrupt_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) mapped_pci_addr + chip_cfg->host_ioa_intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076) pint_regs->host_ioa_interrupt_clr_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) mapped_pci_addr + chip_cfg->host_ioa_intr_clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) /* Current version of firmware exposes interrupt mask set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) * and mask clr registers through memory mapped bar0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) pinstance->mailbox = mapped_pci_addr + chip_cfg->mailbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) pinstance->ioa_status = mapped_pci_addr + chip_cfg->ioastatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) pint_regs->ioa_host_interrupt_mask_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) mapped_pci_addr + chip_cfg->ioa_host_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) pint_regs->ioa_host_interrupt_mask_clr_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) mapped_pci_addr + chip_cfg->ioa_host_mask_clr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) pint_regs->global_interrupt_mask_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) mapped_pci_addr + chip_cfg->global_intr_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) pinstance->ioa_reset_attempts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) init_waitqueue_head(&pinstance->reset_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) atomic_set(&pinstance->outstanding_cmds, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) atomic_set(&pinstance->last_message_id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) atomic_set(&pinstance->expose_resources, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) INIT_LIST_HEAD(&pinstance->free_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) INIT_LIST_HEAD(&pinstance->used_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) INIT_LIST_HEAD(&pinstance->free_cmd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102) INIT_LIST_HEAD(&pinstance->pending_cmd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) spin_lock_init(&pinstance->free_pool_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) spin_lock_init(&pinstance->pending_pool_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) spin_lock_init(&pinstance->resource_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) mutex_init(&pinstance->aen_queue_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) /* Work-queue (Shared) for deferred processing error handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) INIT_WORK(&pinstance->worker_q, pmcraid_worker_function);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) /* Initialize the default log_level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) pinstance->current_log_level = pmcraid_log_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) /* Setup variables required for reset engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) pinstance->ioa_state = IOA_STATE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) pinstance->reset_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) * pmcraid_shutdown - shutdown adapter controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) * @pdev: pci device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) * Issues an adapter shutdown to the card waits for its completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) static void pmcraid_shutdown(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) pmcraid_reset_bringdown(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) * pmcraid_get_minor - returns unused minor number from minor number bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140) static unsigned short pmcraid_get_minor(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) int minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) minor = find_first_zero_bit(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) __set_bit(minor, pmcraid_minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146) return minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) * pmcraid_release_minor - releases given minor back to minor number bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) static void pmcraid_release_minor(unsigned short minor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) __clear_bit(minor, pmcraid_minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) * pmcraid_setup_chrdev - allocates a minor number and registers a char device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) * @pinstance: pointer to adapter instance for which to register device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) * 0 in case of success, otherwise non-zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) int minor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) minor = pmcraid_get_minor();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) cdev_init(&pinstance->cdev, &pmcraid_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) pinstance->cdev.owner = THIS_MODULE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) error = cdev_add(&pinstance->cdev, MKDEV(pmcraid_major, minor), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) pmcraid_release_minor(minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) NULL, "%s%u", PMCRAID_DEVFILE, minor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) * pmcraid_release_chrdev - unregisters per-adapter management interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) * @pinstance: pointer to adapter instance structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) pmcraid_release_minor(MINOR(pinstance->cdev.dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) device_destroy(pmcraid_class,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) cdev_del(&pinstance->cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) * pmcraid_remove - IOA hot plug remove entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) * @pdev: pci device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) static void pmcraid_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211) /* remove the management interface (/dev file) for this device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) pmcraid_release_chrdev(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) /* remove host template from scsi midlayer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215) scsi_remove_host(pinstance->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) /* block requests from mid-layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) scsi_block_requests(pinstance->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220) /* initiate shutdown adapter */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) pmcraid_shutdown(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) pmcraid_disable_interrupts(pinstance, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) flush_work(&pinstance->worker_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) pmcraid_kill_tasklets(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) pmcraid_unregister_interrupt_handler(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) pmcraid_release_buffers(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) iounmap(pinstance->mapped_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) scsi_host_put(pinstance->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) * pmcraid_suspend - driver suspend entry point for power management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) * @pdev: PCI device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) * @state: PCI power state to suspend routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) * Return Value - 0 always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) static int pmcraid_suspend(struct pci_dev *pdev, pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) pmcraid_shutdown(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) pmcraid_disable_interrupts(pinstance, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) pmcraid_kill_tasklets(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252) pci_set_drvdata(pinstance->pdev, pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) pmcraid_unregister_interrupt_handler(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) pci_set_power_state(pdev, pci_choose_state(pdev, state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) * pmcraid_resume - driver resume entry point PCI power management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) * @pdev: PCI device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) * Return Value - 0 in case of success. Error code in case of any failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) static int pmcraid_resume(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) struct Scsi_Host *host = pinstance->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) pci_set_power_state(pdev, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) pci_enable_wake(pdev, PCI_D0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) pci_restore_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) rc = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) dev_err(&pdev->dev, "resume: Enable device failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286) if (sizeof(dma_addr_t) == 4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) dev_err(&pdev->dev, "resume: Failed to set PCI DMA mask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) goto disable_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) pmcraid_disable_interrupts(pinstance, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) atomic_set(&pinstance->outstanding_cmds, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) rc = pmcraid_register_interrupt_handler(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) "resume: couldn't register interrupt handlers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) goto release_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) pmcraid_init_tasklets(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) /* Start with hard reset sequence which brings up IOA to operational
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) * state as well as completes the reset sequence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) pinstance->ioa_hard_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) /* Start IOA firmware initialization and bring card to Operational
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) if (pmcraid_reset_bringup(pinstance)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) dev_err(&pdev->dev, "couldn't initialize IOA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) goto release_tasklets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) release_tasklets:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) pmcraid_disable_interrupts(pinstance, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) pmcraid_kill_tasklets(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) pmcraid_unregister_interrupt_handler(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333) release_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) scsi_host_put(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) disable_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) #define pmcraid_suspend NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) #define pmcraid_resume NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) #endif /* CONFIG_PM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350) * pmcraid_complete_ioa_reset - Called by either timer or tasklet during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) * completion of the ioa reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) * @cmd: pointer to reset command block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) static void pmcraid_complete_ioa_reset(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) spin_lock_irqsave(pinstance->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) pmcraid_ioa_reset(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) spin_unlock_irqrestore(pinstance->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) scsi_unblock_requests(pinstance->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) schedule_work(&pinstance->worker_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) * pmcraid_set_supported_devs - sends SET SUPPORTED DEVICES to IOAFP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) * @cmd: pointer to pmcraid_cmd structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372) * 0 for success or non-zero for failure cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376) struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) void (*cmd_done) (struct pmcraid_cmd *) = pmcraid_complete_ioa_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) pmcraid_reinit_cmdblk(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) ioarcb->request_type = REQ_TYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) ioarcb->cdb[0] = PMCRAID_SET_SUPPORTED_DEVICES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384) ioarcb->cdb[1] = ALL_DEVICES_SUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) /* If this was called as part of resource table reinitialization due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) * lost CCN, it is enough to return the command block back to free pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388) * as part of set_supported_devs completion function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) if (cmd->drv_inst->reinit_cfg_table) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) cmd->drv_inst->reinit_cfg_table = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) cmd->release = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) cmd_done = pmcraid_reinit_cfgtable_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396) /* we will be done with the reset sequence after set supported devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) * setup the done function to return the command block back to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) * pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) pmcraid_send_cmd(cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) cmd_done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) PMCRAID_SET_SUP_DEV_TIMEOUT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403) pmcraid_timeout_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) * pmcraid_set_timestamp - set the timestamp to IOAFP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410) * @cmd: pointer to pmcraid_cmd structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) * 0 for success or non-zero for failure cases
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) __be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) struct pmcraid_ioadl_desc *ioadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) u64 timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) timestamp = ktime_get_real_seconds() * 1000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) pinstance->timestamp_data->timestamp[0] = (__u8)(timestamp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) pinstance->timestamp_data->timestamp[1] = (__u8)((timestamp) >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) pinstance->timestamp_data->timestamp[2] = (__u8)((timestamp) >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) pinstance->timestamp_data->timestamp[3] = (__u8)((timestamp) >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) pinstance->timestamp_data->timestamp[4] = (__u8)((timestamp) >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) pinstance->timestamp_data->timestamp[5] = (__u8)((timestamp) >> 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) pmcraid_reinit_cmdblk(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) ioarcb->request_type = REQ_TYPE_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) ioarcb->cdb[0] = PMCRAID_SCSI_SET_TIMESTAMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) ioarcb->cdb[1] = PMCRAID_SCSI_SERVICE_ACTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) memcpy(&(ioarcb->cdb[6]), &time_stamp_len, sizeof(time_stamp_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) offsetof(struct pmcraid_ioarcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) add_data.u.ioadl[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) ioarcb->ioarcb_bus_addr &= cpu_to_le64(~(0x1FULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) ioarcb->request_flags0 |= NO_LINK_DESCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) ioarcb->data_transfer_length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) ioadl = &(ioarcb->add_data.u.ioadl[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) ioadl->flags = IOADL_FLAGS_LAST_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) ioadl->address = cpu_to_le64(pinstance->timestamp_data_baddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454) if (!pinstance->timestamp_error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) pinstance->timestamp_error = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) pmcraid_send_cmd(cmd, pmcraid_set_supported_devs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) pmcraid_send_cmd(cmd, pmcraid_return_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) * pmcraid_init_res_table - Initialize the resource table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) * @cmd: pointer to pmcraid command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470) * This function looks through the existing resource table, comparing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) * it with the config table. This function will take care of old/new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472) * devices and schedule adding/removing them from the mid-layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) * as appropriate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) * Return value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) * None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) struct pmcraid_resource_entry *res, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) struct pmcraid_config_table_entry *cfgte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) int found, rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) u16 fw_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) LIST_HEAD(old_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) pmcraid_err("IOA requires microcode download\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) /* resource list is protected by pinstance->resource_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) * init_res_table can be called from probe (user-thread) or runtime
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495) * reset (timer/tasklet)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) list_move_tail(&res->queue, &old_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) for (i = 0; i < le16_to_cpu(pinstance->cfg_table->num_entries); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) if (be16_to_cpu(pinstance->inq_data->fw_version) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) PMCRAID_FW_VERSION_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505) cfgte = &pinstance->cfg_table->entries[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507) cfgte = (struct pmcraid_config_table_entry *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) &pinstance->cfg_table->entries_ext[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) if (!pmcraid_expose_resource(fw_version, cfgte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) /* If this entry was already detected and initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) list_for_each_entry_safe(res, temp, &old_res, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) rc = memcmp(&res->cfg_entry.resource_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519) &cfgte->resource_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) sizeof(cfgte->resource_address));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) list_move_tail(&res->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) &pinstance->used_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) /* If this is new entry, initialize it and add it the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) if (list_empty(&pinstance->free_res_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) pmcraid_err("Too many devices attached\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) res = list_entry(pinstance->free_res_q.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) struct pmcraid_resource_entry, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) res->scsi_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) res->change_detected = RES_CHANGE_ADD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) res->reset_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) list_move_tail(&res->queue, &pinstance->used_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) /* copy new configuration table entry details into driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) * maintained resource entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) if (found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) memcpy(&res->cfg_entry, cfgte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552) pinstance->config_table_entry_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) pmcraid_info("New res type:%x, vset:%x, addr:%x:\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) res->cfg_entry.resource_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) (fw_version <= PMCRAID_FW_VERSION_1 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556) res->cfg_entry.unique_flags1 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) le16_to_cpu(res->cfg_entry.array_id) & 0xFF),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) le32_to_cpu(res->cfg_entry.resource_address));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) /* Detect any deleted entries, mark them for deletion from mid-layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) list_for_each_entry_safe(res, temp, &old_res, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) if (res->scsi_dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) res->change_detected = RES_CHANGE_DEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) res->cfg_entry.resource_handle =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) PMCRAID_INVALID_RES_HANDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569) list_move_tail(&res->queue, &pinstance->used_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571) list_move_tail(&res->queue, &pinstance->free_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) /* release the resource list lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576) spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) pmcraid_set_timestamp(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) * pmcraid_querycfg - Send a Query IOA Config to the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582) * @cmd: pointer pmcraid_cmd struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) * This function sends a Query IOA Configuration command to the adapter to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585) * retrieve the IOA configuration table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588) * none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593) struct pmcraid_ioadl_desc *ioadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) struct pmcraid_instance *pinstance = cmd->drv_inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) __be32 cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) if (be16_to_cpu(pinstance->inq_data->fw_version) <=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) PMCRAID_FW_VERSION_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) pinstance->config_table_entry_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) sizeof(struct pmcraid_config_table_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) pinstance->config_table_entry_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) sizeof(struct pmcraid_config_table_entry_ext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) ioarcb->request_type = REQ_TYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) ioarcb->cdb[0] = PMCRAID_QUERY_IOA_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) /* firmware requires 4-byte length field, specified in B.E format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) memcpy(&(ioarcb->cdb[10]), &cfg_table_size, sizeof(cfg_table_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613) /* Since entire config table can be described by single IOADL, it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) * be part of IOARCB itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) offsetof(struct pmcraid_ioarcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) add_data.u.ioadl[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619) ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) ioarcb->ioarcb_bus_addr &= cpu_to_le64(~0x1FULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) ioarcb->request_flags0 |= NO_LINK_DESCS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) ioarcb->data_transfer_length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) cpu_to_le32(sizeof(struct pmcraid_config_table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626) ioadl = &(ioarcb->add_data.u.ioadl[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) ioadl->flags = IOADL_FLAGS_LAST_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631) pmcraid_send_cmd(cmd, pmcraid_init_res_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) * pmcraid_probe - PCI probe entry pointer for PMC MaxRAID controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) * @pdev: pointer to pci device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) * @dev_id: pointer to device ids structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) * Return Value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) * returns 0 if the device is claimed and successfully configured.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) * returns non-zero error code in case of any failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) static int pmcraid_probe(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) const struct pci_device_id *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) struct pmcraid_instance *pinstance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) struct Scsi_Host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) void __iomem *mapped_pci_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) int rc = PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) if (atomic_read(&pmcraid_adapter_count) >= PMCRAID_MAX_ADAPTERS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) pmcraid_err
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) ("maximum number(%d) of supported adapters reached\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) atomic_read(&pmcraid_adapter_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) atomic_inc(&pmcraid_adapter_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) rc = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) dev_err(&pdev->dev, "Cannot enable adapter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) atomic_dec(&pmcraid_adapter_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) dev_info(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) "Found new IOA(%x:%x), Total IOA count: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) pdev->vendor, pdev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) atomic_read(&pmcraid_adapter_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) rc = pci_request_regions(pdev, PMCRAID_DRIVER_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) "Couldn't register memory range of registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679) goto out_disable_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) mapped_pci_addr = pci_iomap(pdev, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) if (!mapped_pci_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) dev_err(&pdev->dev, "Couldn't map PCI registers memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) goto out_release_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) /* Firmware requires the system bus address of IOARCB to be within
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) * 32-bit addressable range though it has 64-bit IOARRIN register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694) * However, firmware supports 64-bit streaming DMA buffers, whereas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) * coherent buffers are to be 32-bit. Since dma_alloc_coherent always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) * returns memory within 4GB (if not, change this logic), coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) * buffers are within firmware acceptable address ranges.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) if (sizeof(dma_addr_t) == 4 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700) dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703) /* firmware expects 32-bit DMA addresses for IOARRIN register; set 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) * bit mask for dma_alloc_coherent to return addresses within 4GB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707) rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) goto cleanup_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) host = scsi_host_alloc(&pmcraid_host_template,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) sizeof(struct pmcraid_instance));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) if (!host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) dev_err(&pdev->dev, "scsi_host_alloc failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) goto cleanup_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) host->max_id = PMCRAID_MAX_NUM_TARGETS_PER_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) host->max_lun = PMCRAID_MAX_NUM_LUNS_PER_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) host->unique_id = host->host_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) host->max_channel = PMCRAID_MAX_BUS_TO_SCAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) host->max_cmd_len = PMCRAID_MAX_CDB_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) /* zero out entire instance structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) pinstance = (struct pmcraid_instance *)host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731) memset(pinstance, 0, sizeof(*pinstance));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) pinstance->chip_cfg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) (struct pmcraid_chip_details *)(dev_id->driver_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) rc = pmcraid_init_instance(pdev, host, mapped_pci_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) dev_err(&pdev->dev, "failed to initialize adapter instance\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) goto out_scsi_host_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) pci_set_drvdata(pdev, pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) /* Save PCI config-space for use following the reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746) rc = pci_save_state(pinstance->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) dev_err(&pdev->dev, "Failed to save PCI config space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) goto out_scsi_host_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) pmcraid_disable_interrupts(pinstance, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755) rc = pmcraid_register_interrupt_handler(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) dev_err(&pdev->dev, "couldn't register interrupt handler\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) goto out_scsi_host_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) pmcraid_init_tasklets(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764) /* allocate verious buffers used by LLD.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) rc = pmcraid_init_buffers(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) pmcraid_err("couldn't allocate memory blocks\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) goto out_unregister_isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) /* check the reset type required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) pmcraid_reset_type(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) /* Start IOA firmware initialization and bring card to Operational
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) * state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) pmcraid_info("starting IOA initialization sequence\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) if (pmcraid_reset_bringup(pinstance)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) dev_err(&pdev->dev, "couldn't initialize IOA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784) goto out_release_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787) /* Add adapter instance into mid-layer list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) rc = scsi_add_host(pinstance->host, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) pmcraid_err("couldn't add host into mid-layer: %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) goto out_release_bufs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) scsi_scan_host(pinstance->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) rc = pmcraid_setup_chrdev(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799) pmcraid_err("couldn't create mgmt interface, error: %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801) goto out_remove_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804) /* Schedule worker thread to handle CCN and take care of adding and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) * removing devices to OS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) atomic_set(&pinstance->expose_resources, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808) schedule_work(&pinstance->worker_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811) out_remove_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) scsi_remove_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) out_release_bufs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) pmcraid_release_buffers(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) out_unregister_isr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) pmcraid_kill_tasklets(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819) pmcraid_unregister_interrupt_handler(pinstance);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) out_scsi_host_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) scsi_host_put(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) cleanup_nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) iounmap(mapped_pci_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) out_release_regions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) out_disable_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) atomic_dec(&pmcraid_adapter_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) * PCI driver structure of pmcraid driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) static struct pci_driver pmcraid_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) .name = PMCRAID_DRIVER_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) .id_table = pmcraid_pci_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842) .probe = pmcraid_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) .remove = pmcraid_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844) .suspend = pmcraid_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) .resume = pmcraid_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) .shutdown = pmcraid_shutdown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) * pmcraid_init - module load entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) static int __init pmcraid_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) dev_t dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857) pmcraid_info("%s Device Driver version: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) PMCRAID_DRIVER_NAME, PMCRAID_DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) error = alloc_chrdev_region(&dev, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861) PMCRAID_MAX_ADAPTERS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) PMCRAID_DEVFILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865) pmcraid_err("failed to get a major number for adapters\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) goto out_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) pmcraid_major = MAJOR(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) pmcraid_class = class_create(THIS_MODULE, PMCRAID_DEVFILE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) if (IS_ERR(pmcraid_class)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873) error = PTR_ERR(pmcraid_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874) pmcraid_err("failed to register with sysfs, error = %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875) error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) goto out_unreg_chrdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879) error = pmcraid_netlink_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) class_destroy(pmcraid_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) goto out_unreg_chrdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886) error = pci_register_driver(&pmcraid_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888) if (error == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889) goto out_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891) pmcraid_err("failed to register pmcraid driver, error = %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893) class_destroy(pmcraid_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894) pmcraid_netlink_release();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) out_unreg_chrdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) out_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) return error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904) * pmcraid_exit - module unload entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) static void __exit pmcraid_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908) pmcraid_netlink_release();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909) unregister_chrdev_region(MKDEV(pmcraid_major, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) PMCRAID_MAX_ADAPTERS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) pci_unregister_driver(&pmcraid_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912) class_destroy(pmcraid_class);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) module_init(pmcraid_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916) module_exit(pmcraid_exit);