^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Disk Array driver for HP Smart Array SAS controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2016 Microsemi Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2014-2015 PMC-Sierra, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * it under the terms of the GNU General Public License as published by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * the Free Software Foundation; version 2 of the License.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * This program is distributed in the hope that it will be useful,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * but WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * NON INFRINGEMENT. See the GNU General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/blktrace_api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #include <scsi/scsi_eh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #include <scsi/scsi_transport_sas.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <scsi/scsi_dbg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #include <linux/cciss_ioctl.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/percpu-defs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #include <asm/div64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #include "hpsa_cmd.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #include "hpsa.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * with an optional trailing '-' followed by a byte value (0-255).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define HPSA_DRIVER_VERSION "3.4.20-200"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define HPSA "hpsa"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* How long to wait for CISS doorbell communication */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define MAX_IOCTL_CONFIG_WAIT 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*define how many times we will try a command because of bus resets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define MAX_CMD_RETRIES 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) /* How long to wait before giving up on a command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define HPSA_EH_PTRAID_TIMEOUT (240 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* Embedded module documentation macros - see modules.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) MODULE_AUTHOR("Hewlett-Packard Company");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) HPSA_DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) MODULE_VERSION(HPSA_DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) MODULE_ALIAS("cciss");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) static int hpsa_simple_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) MODULE_PARM_DESC(hpsa_simple_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) "Use 'simple mode' rather than 'performant mode'");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /* define the PCI info for the cards we can control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) static const struct pci_device_id hpsa_pci_device_id[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1920},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103c, 0x1925},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {0,}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* board_id = Subsystem Device ID & Vendor ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * product = Marketing Name for the board
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * access = Address of the struct of function pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static struct board_type products[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {0x40700E11, "Smart Array 5300", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {0x40800E11, "Smart Array 5i", &SA5B_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {0x40820E11, "Smart Array 532", &SA5B_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {0x40830E11, "Smart Array 5312", &SA5B_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {0x409A0E11, "Smart Array 641", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {0x409B0E11, "Smart Array 642", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {0x409C0E11, "Smart Array 6400", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {0x409D0E11, "Smart Array 6400 EM", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {0x40910E11, "Smart Array 6i", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {0x3225103C, "Smart Array P600", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {0x3223103C, "Smart Array P800", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {0x3234103C, "Smart Array P400", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {0x3235103C, "Smart Array P400i", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {0x3211103C, "Smart Array E200i", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) {0x3212103C, "Smart Array E200", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {0x3213103C, "Smart Array E200i", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {0x3214103C, "Smart Array E200i", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) {0x3215103C, "Smart Array E200i", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {0x3237103C, "Smart Array E500", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {0x323D103C, "Smart Array P700m", &SA5A_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {0x3241103C, "Smart Array P212", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {0x3243103C, "Smart Array P410", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {0x3245103C, "Smart Array P410i", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {0x3247103C, "Smart Array P411", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {0x3249103C, "Smart Array P812", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {0x324A103C, "Smart Array P712m", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {0x324B103C, "Smart Array P711m", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {0x3350103C, "Smart Array P222", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {0x3351103C, "Smart Array P420", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {0x3352103C, "Smart Array P421", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {0x3353103C, "Smart Array P822", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) {0x3354103C, "Smart Array P420i", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {0x3355103C, "Smart Array P220i", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {0x3356103C, "Smart Array P721m", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {0x1920103C, "Smart Array P430i", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {0x1921103C, "Smart Array P830i", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {0x1922103C, "Smart Array P430", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {0x1923103C, "Smart Array P431", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {0x1924103C, "Smart Array P830", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {0x1925103C, "Smart Array P831", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {0x1926103C, "Smart Array P731m", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {0x1928103C, "Smart Array P230i", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {0x1929103C, "Smart Array P530", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) {0x21BD103C, "Smart Array P244br", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {0x21BE103C, "Smart Array P741m", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {0x21BF103C, "Smart HBA H240ar", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {0x21C0103C, "Smart Array P440ar", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {0x21C1103C, "Smart Array P840ar", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {0x21C2103C, "Smart Array P440", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) {0x21C3103C, "Smart Array P441", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {0x21C4103C, "Smart Array", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {0x21C5103C, "Smart Array P841", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {0x21C6103C, "Smart HBA H244br", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {0x21C7103C, "Smart HBA H240", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {0x21C8103C, "Smart HBA H241", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) {0x21C9103C, "Smart Array", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {0x21CA103C, "Smart Array P246br", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {0x21CB103C, "Smart Array P840", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {0x21CC103C, "Smart Array", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {0x21CD103C, "Smart Array", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {0x21CE103C, "Smart HBA", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {0x05809005, "SmartHBA-SA", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) {0x05819005, "SmartHBA-SA 8i", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) {0x05839005, "SmartHBA-SA 8e", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {0x05849005, "SmartHBA-SA 16i", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {0xFFFF103C, "Unknown Smart Array", &SA5_access},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) static struct scsi_transport_template *hpsa_sas_transport_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static int hpsa_add_sas_host(struct ctlr_info *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static void hpsa_delete_sas_host(struct ctlr_info *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) struct hpsa_scsi_dev_t *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static struct hpsa_scsi_dev_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) struct sas_rphy *rphy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static const struct scsi_cmnd hpsa_cmd_busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static const struct scsi_cmnd hpsa_cmd_idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static int number_of_controllers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) void __user *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static int hpsa_passthru_ioctl(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) IOCTL_Command_struct *iocommand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) BIG_IOCTL_Command_struct *ioc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) void __user *arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static void cmd_free(struct ctlr_info *h, struct CommandList *c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static struct CommandList *cmd_alloc(struct ctlr_info *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct scsi_cmnd *scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) int cmd_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) static void hpsa_free_cmd_pool(struct ctlr_info *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #define VPD_PAGE (1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #define HPSA_SIMPLE_ERROR_BITS 0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static void hpsa_scan_start(struct Scsi_Host *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static int hpsa_scan_finished(struct Scsi_Host *sh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) unsigned long elapsed_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) static int hpsa_slave_alloc(struct scsi_device *sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static int hpsa_slave_configure(struct scsi_device *sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static void hpsa_slave_destroy(struct scsi_device *sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) static void hpsa_update_scsi_devices(struct ctlr_info *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static int check_for_unit_attention(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) struct CommandList *c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static void check_ioctl_unit_attention(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) struct CommandList *c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /* performant mode helper functions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static void calc_bucket_map(int *bucket, int num_buckets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) int nsgs, int min_blocks, u32 *bucket_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static void hpsa_free_performant_mode(struct ctlr_info *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static inline u32 next_command(struct ctlr_info *h, u8 q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) u32 *cfg_base_addr, u64 *cfg_base_addr_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) u64 *cfg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned long *memory_bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) bool *legacy_board);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static int wait_for_device_to_become_ready(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unsigned char lunaddr[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int reply_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) int wait_for_ready);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static inline void finish_cmd(struct CommandList *c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) #define BOARD_NOT_READY 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) #define BOARD_READY 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static void hpsa_drain_accel_commands(struct ctlr_info *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static void hpsa_flush_cache(struct ctlr_info *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static void hpsa_command_resubmit_worker(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) static u32 lockup_detected(struct ctlr_info *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static int detect_controller_lockup(struct ctlr_info *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) static void hpsa_disable_rld_caching(struct ctlr_info *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) struct ReportExtendedLUNdata *buf, int bufsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) static bool hpsa_vpd_page_supported(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) unsigned char scsi3addr[], u8 page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) static int hpsa_luns_changed(struct ctlr_info *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) struct hpsa_scsi_dev_t *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) unsigned char *scsi3addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) unsigned long *priv = shost_priv(sdev->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) return (struct ctlr_info *) *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) unsigned long *priv = shost_priv(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return (struct ctlr_info *) *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) static inline bool hpsa_is_cmd_idle(struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return c->scsi_cmd == SCSI_CMD_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) /* extract sense key, asc, and ascq from sense data. -1 means invalid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) static void decode_sense_data(const u8 *sense_data, int sense_data_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) u8 *sense_key, u8 *asc, u8 *ascq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) struct scsi_sense_hdr sshdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) bool rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) *sense_key = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) *asc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) *ascq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) if (sense_data_len < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) *sense_key = sshdr.sense_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) *asc = sshdr.asc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) *ascq = sshdr.ascq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) static int check_for_unit_attention(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) u8 sense_key, asc, ascq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) int sense_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) sense_len = sizeof(c->err_info->SenseInfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) sense_len = c->err_info->SenseLen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) decode_sense_data(c->err_info->SenseInfo, sense_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) &sense_key, &asc, &ascq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (sense_key != UNIT_ATTENTION || asc == 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) switch (asc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) case STATE_CHANGED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) "%s: a state change detected, command retried\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) h->devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) case LUN_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) "%s: LUN failure detected\n", h->devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) case REPORT_LUNS_CHANGED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) "%s: report LUN data changed\n", h->devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * target (array) devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) case POWER_OR_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) "%s: a power on or device reset detected\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) h->devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) case UNIT_ATTENTION_CLEARED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) "%s: unit attention cleared by another initiator\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) h->devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) "%s: unknown unit attention detected\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) h->devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) dev_warn(&h->pdev->dev, HPSA "device busy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) static u32 lockup_detected(struct ctlr_info *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static ssize_t host_show_lockup_detected(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) int ld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) h = shost_to_hba(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) ld = lockup_detected(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return sprintf(buf, "ld=%d\n", ld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) int status, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) char tmpbuf[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) strncpy(tmpbuf, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) tmpbuf[len] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (sscanf(tmpbuf, "%d", &status) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) h = shost_to_hba(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) h->acciopath_status = !!status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) "hpsa: HP SSD Smart Path %s via sysfs update.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) h->acciopath_status ? "enabled" : "disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) static ssize_t host_store_raid_offload_debug(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) int debug_level, len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) char tmpbuf[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) strncpy(tmpbuf, buf, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) tmpbuf[len] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (sscanf(tmpbuf, "%d", &debug_level) != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) if (debug_level < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) debug_level = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) h = shost_to_hba(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) h->raid_offload_debug = debug_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) h->raid_offload_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) static ssize_t host_store_rescan(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) h = shost_to_hba(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) hpsa_scan_start(h->scsi_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) device->offload_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) device->offload_to_be_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static ssize_t host_show_firmware_revision(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) unsigned char *fwrev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) h = shost_to_hba(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (!h->hba_inquiry_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) fwrev = &h->hba_inquiry_data[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return snprintf(buf, 20, "%c%c%c%c\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static ssize_t host_show_commands_outstanding(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) struct ctlr_info *h = shost_to_hba(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return snprintf(buf, 20, "%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) atomic_read(&h->commands_outstanding));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) static ssize_t host_show_transport_mode(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) h = shost_to_hba(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) return snprintf(buf, 20, "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) h->transMethod & CFGTBL_Trans_Performant ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) "performant" : "simple");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) h = shost_to_hba(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) return snprintf(buf, 30, "HP SSD Smart Path %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) (h->acciopath_status == 1) ? "enabled" : "disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) /* List of controllers which cannot be hard reset on kexec with reset_devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static u32 unresettable_controller[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 0x324a103C, /* Smart Array P712m */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 0x324b103C, /* Smart Array P711m */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 0x3223103C, /* Smart Array P800 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 0x3234103C, /* Smart Array P400 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 0x3235103C, /* Smart Array P400i */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 0x3211103C, /* Smart Array E200i */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 0x3212103C, /* Smart Array E200 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 0x3213103C, /* Smart Array E200i */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 0x3214103C, /* Smart Array E200i */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 0x3215103C, /* Smart Array E200i */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 0x3237103C, /* Smart Array E500 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 0x323D103C, /* Smart Array P700m */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 0x40800E11, /* Smart Array 5i */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 0x409C0E11, /* Smart Array 6400 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 0x409D0E11, /* Smart Array 6400 EM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 0x40700E11, /* Smart Array 5300 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 0x40820E11, /* Smart Array 532 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 0x40830E11, /* Smart Array 5312 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 0x409A0E11, /* Smart Array 641 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 0x409B0E11, /* Smart Array 642 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 0x40910E11, /* Smart Array 6i */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) /* List of controllers which cannot even be soft reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static u32 soft_unresettable_controller[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 0x40800E11, /* Smart Array 5i */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 0x40700E11, /* Smart Array 5300 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 0x40820E11, /* Smart Array 532 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 0x40830E11, /* Smart Array 5312 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 0x409A0E11, /* Smart Array 641 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 0x409B0E11, /* Smart Array 642 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 0x40910E11, /* Smart Array 6i */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /* Exclude 640x boards. These are two pci devices in one slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * which share a battery backed cache module. One controls the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * cache, the other accesses the cache through the one that controls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) * it. If we reset the one controlling the cache, the other will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) * likely not be happy. Just forbid resetting this conjoined mess.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) * The 640x isn't really supported by hpsa anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 0x409C0E11, /* Smart Array 6400 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 0x409D0E11, /* Smart Array 6400 EM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) static int board_id_in_array(u32 a[], int nelems, u32 board_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) for (i = 0; i < nelems; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) if (a[i] == board_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) static int ctlr_is_hard_resettable(u32 board_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) return !board_id_in_array(unresettable_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) ARRAY_SIZE(unresettable_controller), board_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static int ctlr_is_soft_resettable(u32 board_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) return !board_id_in_array(soft_unresettable_controller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) ARRAY_SIZE(soft_unresettable_controller), board_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static int ctlr_is_resettable(u32 board_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return ctlr_is_hard_resettable(board_id) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ctlr_is_soft_resettable(board_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) static ssize_t host_show_resettable(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) h = shost_to_hba(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) return (scsi3addr[3] & 0xC0) == 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) "1(+0)ADM", "UNKNOWN", "PHYS DRV"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) #define HPSA_RAID_0 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) #define HPSA_RAID_4 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) #define HPSA_RAID_1 2 /* also used for RAID 10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) #define HPSA_RAID_5 3 /* also used for RAID 50 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) #define HPSA_RAID_51 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) #define HPSA_RAID_6 5 /* also used for RAID 60 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) #define PHYSICAL_DRIVE (ARRAY_SIZE(raid_label) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) return !device->physical_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static ssize_t raid_level_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) ssize_t l = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) unsigned char rlevel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) struct hpsa_scsi_dev_t *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) h = sdev_to_hba(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) hdev = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (!hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) /* Is this even a logical drive? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (!is_logical_device(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) l = snprintf(buf, PAGE_SIZE, "N/A\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) return l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) rlevel = hdev->raid_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (rlevel > RAID_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) rlevel = RAID_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) static ssize_t lunid_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) struct hpsa_scsi_dev_t *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) unsigned char lunid[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) h = sdev_to_hba(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) hdev = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) if (!hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) return snprintf(buf, 20, "0x%8phN\n", lunid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) static ssize_t unique_id_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) struct hpsa_scsi_dev_t *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) unsigned char sn[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) h = sdev_to_hba(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) hdev = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) if (!hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) memcpy(sn, hdev->device_id, sizeof(sn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) return snprintf(buf, 16 * 2 + 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) "%02X%02X%02X%02X%02X%02X%02X%02X"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) "%02X%02X%02X%02X%02X%02X%02X%02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) sn[0], sn[1], sn[2], sn[3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) sn[4], sn[5], sn[6], sn[7],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) sn[8], sn[9], sn[10], sn[11],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) sn[12], sn[13], sn[14], sn[15]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) static ssize_t sas_address_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) struct hpsa_scsi_dev_t *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) u64 sas_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) h = sdev_to_hba(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) hdev = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (!hdev || is_logical_device(hdev) || !hdev->expose_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) sas_address = hdev->sas_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) return snprintf(buf, PAGE_SIZE, "0x%016llx\n", sas_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) struct hpsa_scsi_dev_t *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) int offload_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) h = sdev_to_hba(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) hdev = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) if (!hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) offload_enabled = hdev->offload_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) if (hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) return snprintf(buf, 20, "%d\n", offload_enabled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) return snprintf(buf, 40, "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) "Not applicable for a controller");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) #define MAX_PATHS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) static ssize_t path_info_show(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) struct hpsa_scsi_dev_t *hdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) int output_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) u8 box;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) u8 bay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) u8 path_map_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) char *active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) unsigned char phys_connector[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) h = sdev_to_hba(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) spin_lock_irqsave(&h->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) hdev = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) if (!hdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) spin_unlock_irqrestore(&h->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) bay = hdev->bay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) for (i = 0; i < MAX_PATHS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) path_map_index = 1<<i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) if (i == hdev->active_path_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) active = "Active";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) else if (hdev->path_map & path_map_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) active = "Inactive";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) output_len += scnprintf(buf + output_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) PAGE_SIZE - output_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) "[%d:%d:%d:%d] %20.20s ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) h->scsi_host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) hdev->bus, hdev->target, hdev->lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) scsi_device_type(hdev->devtype));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) if (hdev->devtype == TYPE_RAID || is_logical_device(hdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) output_len += scnprintf(buf + output_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) PAGE_SIZE - output_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) "%s\n", active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) box = hdev->box[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) memcpy(&phys_connector, &hdev->phys_connector[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) sizeof(phys_connector));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) if (phys_connector[0] < '0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) phys_connector[0] = '0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) if (phys_connector[1] < '0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) phys_connector[1] = '0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) output_len += scnprintf(buf + output_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) PAGE_SIZE - output_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) "PORT: %.2s ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) phys_connector);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) if ((hdev->devtype == TYPE_DISK || hdev->devtype == TYPE_ZBC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) hdev->expose_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (box == 0 || box == 0xFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) output_len += scnprintf(buf + output_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) PAGE_SIZE - output_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) "BAY: %hhu %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) bay, active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) output_len += scnprintf(buf + output_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) PAGE_SIZE - output_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) "BOX: %hhu BAY: %hhu %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) box, bay, active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) } else if (box != 0 && box != 0xFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) output_len += scnprintf(buf + output_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) PAGE_SIZE - output_len, "BOX: %hhu %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) box, active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) output_len += scnprintf(buf + output_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) PAGE_SIZE - output_len, "%s\n", active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) spin_unlock_irqrestore(&h->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) return output_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) static ssize_t host_show_ctlr_num(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) h = shost_to_hba(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) return snprintf(buf, 20, "%d\n", h->ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) static ssize_t host_show_legacy_board(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) h = shost_to_hba(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) return snprintf(buf, 20, "%d\n", h->legacy_board ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) static DEVICE_ATTR_RO(raid_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) static DEVICE_ATTR_RO(lunid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) static DEVICE_ATTR_RO(unique_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) static DEVICE_ATTR_RO(sas_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) host_show_hp_ssd_smart_path_enabled, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static DEVICE_ATTR_RO(path_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) host_show_hp_ssd_smart_path_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) host_store_hp_ssd_smart_path_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) host_store_raid_offload_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) static DEVICE_ATTR(firmware_revision, S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) host_show_firmware_revision, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) static DEVICE_ATTR(commands_outstanding, S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) host_show_commands_outstanding, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) static DEVICE_ATTR(transport_mode, S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) host_show_transport_mode, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) static DEVICE_ATTR(resettable, S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) host_show_resettable, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) static DEVICE_ATTR(lockup_detected, S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) host_show_lockup_detected, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) static DEVICE_ATTR(ctlr_num, S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) host_show_ctlr_num, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) static DEVICE_ATTR(legacy_board, S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) host_show_legacy_board, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) static struct device_attribute *hpsa_sdev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) &dev_attr_raid_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) &dev_attr_lunid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) &dev_attr_unique_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) &dev_attr_hp_ssd_smart_path_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) &dev_attr_path_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) &dev_attr_sas_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) static struct device_attribute *hpsa_shost_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) &dev_attr_rescan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) &dev_attr_firmware_revision,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) &dev_attr_commands_outstanding,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) &dev_attr_transport_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) &dev_attr_resettable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) &dev_attr_hp_ssd_smart_path_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) &dev_attr_raid_offload_debug,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) &dev_attr_lockup_detected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) &dev_attr_ctlr_num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) &dev_attr_legacy_board,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_DRIVER +\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) HPSA_MAX_CONCURRENT_PASSTHRUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static struct scsi_host_template hpsa_driver_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) .module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) .name = HPSA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) .proc_name = HPSA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) .queuecommand = hpsa_scsi_queue_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) .scan_start = hpsa_scan_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) .scan_finished = hpsa_scan_finished,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) .change_queue_depth = hpsa_change_queue_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) .this_id = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) .eh_device_reset_handler = hpsa_eh_device_reset_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) .ioctl = hpsa_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) .slave_alloc = hpsa_slave_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) .slave_configure = hpsa_slave_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) .slave_destroy = hpsa_slave_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) .compat_ioctl = hpsa_compat_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) .sdev_attrs = hpsa_sdev_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) .shost_attrs = hpsa_shost_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) .max_sectors = 2048,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) .no_write_same = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) static inline u32 next_command(struct ctlr_info *h, u8 q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) u32 a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) struct reply_queue_buffer *rq = &h->reply_queue[q];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) if (h->transMethod & CFGTBL_Trans_io_accel1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) return h->access.command_completed(h, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return h->access.command_completed(h, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) a = rq->head[rq->current_entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) rq->current_entry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) atomic_dec(&h->commands_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) a = FIFO_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) /* Check for wraparound */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) if (rq->current_entry == h->max_commands) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) rq->current_entry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) rq->wraparound ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) return a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) * There are some special bits in the bus address of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * command that we have to set for the controller to know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * how to process the command:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) * Normal performant mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) * bit 0: 1 means performant mode, 0 means simple mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) * bits 1-3 = block fetch table entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) * bits 4-6 = command type (== 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * ioaccel1 mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * bit 0 = "performant mode" bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * bits 1-3 = block fetch table entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * bits 4-6 = command type (== 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) * (command type is needed because ioaccel1 mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * commands are submitted through the same register as normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * mode commands, so this is how the controller knows whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) * the command is normal mode or ioaccel1 mode.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * ioaccel2 mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * bit 0 = "performant mode" bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) * bits 1-4 = block fetch table entry (note extra bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) * bits 4-6 = not needed, because ioaccel2 mode has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * a separate special register for submitting commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * set_performant_mode: Modify the tag for cciss performant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * set bit 0 for pull model, bits 3-1 for block fetch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * register number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) #define DEFAULT_REPLY_QUEUE (-1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) int reply_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (unlikely(!h->msix_vectors))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) c->Header.ReplyQueue = reply_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) static void set_ioaccel1_performant_mode(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) struct CommandList *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) int reply_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * Tell the controller to post the reply to the queue for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * processor. This seems to give the best I/O throughput.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) cp->ReplyQueue = reply_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) * Set the bits in the address sent down to include:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) * - performant mode bit (bit 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) * - pull count (bits 1-3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) * - command type (bits 4-6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) IOACCEL1_BUSADDR_CMDTYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) struct CommandList *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) int reply_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) &h->ioaccel2_cmd_pool[c->cmdindex];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) /* Tell the controller to post the reply to the queue for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) * processor. This seems to give the best I/O throughput.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) cp->reply_queue = reply_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) /* Set the bits in the address sent down to include:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * - performant mode bit not used in ioaccel mode 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * - pull count (bits 0-3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * - command type isn't needed for ioaccel2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) c->busaddr |= h->ioaccel2_blockFetchTable[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) static void set_ioaccel2_performant_mode(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) struct CommandList *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) int reply_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * Tell the controller to post the reply to the queue for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * processor. This seems to give the best I/O throughput.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) cp->reply_queue = reply_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * Set the bits in the address sent down to include:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) * - performant mode bit not used in ioaccel mode 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * - pull count (bits 0-3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) * - command type isn't needed for ioaccel2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) static int is_firmware_flash_cmd(u8 *cdb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) * During firmware flash, the heartbeat register may not update as frequently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * as it should. So we dial down lockup detection during firmware flash. and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * dial it back up when firmware flash completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) #define HPSA_EVENT_MONITOR_INTERVAL (15 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (!is_firmware_flash_cmd(c->Request.CDB))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) atomic_inc(&h->firmware_flash_in_progress);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (is_firmware_flash_cmd(c->Request.CDB) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) atomic_dec_and_test(&h->firmware_flash_in_progress))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) struct CommandList *c, int reply_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) dial_down_lockup_detection_during_fw_flash(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) atomic_inc(&h->commands_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (c->device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) atomic_inc(&c->device->commands_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) reply_queue = h->reply_map[raw_smp_processor_id()];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) switch (c->cmd_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) case CMD_IOACCEL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) set_ioaccel1_performant_mode(h, c, reply_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) case CMD_IOACCEL2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) set_ioaccel2_performant_mode(h, c, reply_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) case IOACCEL2_TMF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) set_performant_mode(h, c, reply_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) h->access.submit_command(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) static inline int is_hba_lunid(unsigned char scsi3addr[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static inline int is_scsi_rev_5(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (!h->hba_inquiry_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if ((h->hba_inquiry_data[2] & 0x07) == 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) static int hpsa_find_target_lun(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) unsigned char scsi3addr[], int bus, int *target, int *lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) /* finds an unused bus, target, lun for a new physical device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) * assumes h->devlock is held
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) int i, found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) for (i = 0; i < h->ndevices; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) __set_bit(h->dev[i]->target, lun_taken);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) if (i < HPSA_MAX_DEVICES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) /* *bus = 1; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) *target = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) *lun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) return !found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) struct hpsa_scsi_dev_t *dev, char *description)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) #define LABEL_SIZE 25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) char label[LABEL_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) switch (dev->devtype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) case TYPE_RAID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) snprintf(label, LABEL_SIZE, "controller");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) case TYPE_ENCLOSURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) snprintf(label, LABEL_SIZE, "enclosure");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) case TYPE_DISK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) case TYPE_ZBC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) if (dev->external)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) snprintf(label, LABEL_SIZE, "external");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) else if (!is_logical_dev_addr_mode(dev->scsi3addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) snprintf(label, LABEL_SIZE, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) raid_label[PHYSICAL_DRIVE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) snprintf(label, LABEL_SIZE, "RAID-%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) dev->raid_level > RAID_UNKNOWN ? "?" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) raid_label[dev->raid_level]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) case TYPE_ROM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) snprintf(label, LABEL_SIZE, "rom");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) case TYPE_TAPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) snprintf(label, LABEL_SIZE, "tape");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) case TYPE_MEDIUM_CHANGER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) snprintf(label, LABEL_SIZE, "changer");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) snprintf(label, LABEL_SIZE, "UNKNOWN");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) dev_printk(level, &h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) "scsi %d:%d:%d:%d: %s %s %.8s %.16s %s SSDSmartPathCap%c En%c Exp=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) description,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) scsi_device_type(dev->devtype),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) dev->vendor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) dev->model,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) label,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) dev->offload_config ? '+' : '-',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) dev->offload_to_be_enabled ? '+' : '-',
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) dev->expose_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) /* Add an entry into h->dev[] array. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static int hpsa_scsi_add_entry(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) struct hpsa_scsi_dev_t *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) struct hpsa_scsi_dev_t *added[], int *nadded)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) /* assumes h->devlock is held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) int n = h->ndevices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) unsigned char addr1[8], addr2[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) struct hpsa_scsi_dev_t *sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) if (n >= HPSA_MAX_DEVICES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) dev_err(&h->pdev->dev, "too many devices, some will be "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) "inaccessible.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) /* physical devices do not have lun or target assigned until now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) if (device->lun != -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) /* Logical device, lun is already assigned. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) goto lun_assigned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) /* If this device a non-zero lun of a multi-lun device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) * byte 4 of the 8-byte LUN addr will contain the logical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) * unit no, zero otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) if (device->scsi3addr[4] == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /* This is not a non-zero lun of a multi-lun device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) if (hpsa_find_target_lun(h, device->scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) device->bus, &device->target, &device->lun) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) goto lun_assigned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) /* This is a non-zero lun of a multi-lun device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * Search through our list and find the device which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) * has the same 8 byte LUN address, excepting byte 4 and 5.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) * Assign the same bus and target for this new LUN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) * Use the logical unit number from the firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) memcpy(addr1, device->scsi3addr, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) addr1[4] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) addr1[5] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) sd = h->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) memcpy(addr2, sd->scsi3addr, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) addr2[4] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) addr2[5] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) /* differ only in byte 4 and 5? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (memcmp(addr1, addr2, 8) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) device->bus = sd->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) device->target = sd->target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) device->lun = device->scsi3addr[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) if (device->lun == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) " suspect firmware bug or unsupported hardware "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) "configuration.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) lun_assigned:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) h->dev[n] = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) h->ndevices++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) added[*nadded] = device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) (*nadded)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) hpsa_show_dev_msg(KERN_INFO, h, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) device->expose_device ? "added" : "masked");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) * Called during a scan operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * Update an entry in h->dev[] array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) static void hpsa_scsi_update_entry(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) int entry, struct hpsa_scsi_dev_t *new_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* assumes h->devlock is held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) /* Raid level changed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) h->dev[entry]->raid_level = new_entry->raid_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) * ioacccel_handle may have changed for a dual domain disk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) /* Raid offload parameters changed. Careful about the ordering. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) if (new_entry->offload_config && new_entry->offload_to_be_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) * if drive is newly offload_enabled, we want to copy the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) * raid map data first. If previously offload_enabled and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) * offload_config were set, raid map data had better be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) * the same as it was before. If raid map data has changed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) * then it had better be the case that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) * h->dev[entry]->offload_enabled is currently 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) h->dev[entry]->raid_map = new_entry->raid_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) if (new_entry->offload_to_be_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) h->dev[entry]->offload_config = new_entry->offload_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) h->dev[entry]->queue_depth = new_entry->queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) * We can turn off ioaccel offload now, but need to delay turning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) * ioaccel on until we can update h->dev[entry]->phys_disk[], but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * can't do that until all the devices are updated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) h->dev[entry]->offload_to_be_enabled = new_entry->offload_to_be_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * turn ioaccel off immediately if told to do so.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (!new_entry->offload_to_be_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) h->dev[entry]->offload_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) /* Replace an entry from h->dev[] array. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) static void hpsa_scsi_replace_entry(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) int entry, struct hpsa_scsi_dev_t *new_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) struct hpsa_scsi_dev_t *added[], int *nadded,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) struct hpsa_scsi_dev_t *removed[], int *nremoved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) /* assumes h->devlock is held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) removed[*nremoved] = h->dev[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) (*nremoved)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * New physical devices won't have target/lun assigned yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * so we need to preserve the values in the slot we are replacing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) if (new_entry->target == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) new_entry->target = h->dev[entry]->target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) new_entry->lun = h->dev[entry]->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) h->dev[entry] = new_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) added[*nadded] = new_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) (*nadded)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) /* Remove an entry from h->dev[] array. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) struct hpsa_scsi_dev_t *removed[], int *nremoved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) /* assumes h->devlock is held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) struct hpsa_scsi_dev_t *sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) sd = h->dev[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) removed[*nremoved] = h->dev[entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) (*nremoved)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) for (i = entry; i < h->ndevices-1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) h->dev[i] = h->dev[i+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) h->ndevices--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) #define SCSI3ADDR_EQ(a, b) ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) (a)[7] == (b)[7] && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) (a)[6] == (b)[6] && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) (a)[5] == (b)[5] && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) (a)[4] == (b)[4] && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) (a)[3] == (b)[3] && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) (a)[2] == (b)[2] && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) (a)[1] == (b)[1] && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) (a)[0] == (b)[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) static void fixup_botched_add(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) struct hpsa_scsi_dev_t *added)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) /* called when scsi_add_device fails in order to re-adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * h->dev[] to match the mid layer's view.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) for (i = 0; i < h->ndevices; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) if (h->dev[i] == added) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) for (j = i; j < h->ndevices-1; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) h->dev[j] = h->dev[j+1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) h->ndevices--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) kfree(added);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) struct hpsa_scsi_dev_t *dev2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) /* we compare everything except lun and target as these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) * are not yet assigned. Compare parts likely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) * to differ first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) sizeof(dev1->scsi3addr)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) if (memcmp(dev1->device_id, dev2->device_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) sizeof(dev1->device_id)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) if (dev1->devtype != dev2->devtype)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) if (dev1->bus != dev2->bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) struct hpsa_scsi_dev_t *dev2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) /* Device attributes that can change, but don't mean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * that the device is a different device, nor that the OS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * needs to be told anything about the change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) if (dev1->raid_level != dev2->raid_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) if (dev1->offload_config != dev2->offload_config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) if (dev1->offload_to_be_enabled != dev2->offload_to_be_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) if (!is_logical_dev_addr_mode(dev1->scsi3addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) if (dev1->queue_depth != dev2->queue_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * This can happen for dual domain devices. An active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * path change causes the ioaccel handle to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * for example note the handle differences between p0 and p1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * Device WWN ,WWN hash,Handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) * D016 p0|0x3 [02]P2E:01:01,0x5000C5005FC4DACA,0x9B5616,0x01030003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) * p1 0x5000C5005FC4DAC9,0x6798C0,0x00040004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) if (dev1->ioaccel_handle != dev2->ioaccel_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) /* Find needle in haystack. If exact match found, return DEVICE_SAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) * and return needle location in *index. If scsi3addr matches, but not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) * location in *index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) * In the case of a minor device attribute change, such as RAID level, just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) * return DEVICE_UPDATED, along with the updated device's location in index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) * If needle not found, return DEVICE_NOT_FOUND.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) struct hpsa_scsi_dev_t *haystack[], int haystack_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) int *index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) #define DEVICE_NOT_FOUND 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) #define DEVICE_CHANGED 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) #define DEVICE_SAME 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) #define DEVICE_UPDATED 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) if (needle == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) return DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) for (i = 0; i < haystack_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) if (haystack[i] == NULL) /* previously removed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) *index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) if (device_is_the_same(needle, haystack[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (device_updated(needle, haystack[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) return DEVICE_UPDATED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) return DEVICE_SAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) /* Keep offline devices offline */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) if (needle->volume_offline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) return DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) return DEVICE_CHANGED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) *index = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) return DEVICE_NOT_FOUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) static void hpsa_monitor_offline_device(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) unsigned char scsi3addr[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) struct offline_device_entry *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) /* Check to see if device is already on the list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) spin_lock_irqsave(&h->offline_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) list_for_each_entry(device, &h->offline_device_list, offline_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (memcmp(device->scsi3addr, scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) sizeof(device->scsi3addr)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) spin_unlock_irqrestore(&h->offline_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) spin_unlock_irqrestore(&h->offline_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) /* Device is not on the list, add it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) device = kmalloc(sizeof(*device), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) if (!device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) spin_lock_irqsave(&h->offline_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) list_add_tail(&device->offline_list, &h->offline_device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) spin_unlock_irqrestore(&h->offline_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) /* Print a message explaining various offline volume states */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) static void hpsa_show_volume_status(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) struct hpsa_scsi_dev_t *sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) h->scsi_host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) sd->bus, sd->target, sd->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) switch (sd->volume_offline) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) case HPSA_LV_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) case HPSA_LV_UNDERGOING_ERASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) h->scsi_host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) sd->bus, sd->target, sd->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) case HPSA_LV_NOT_AVAILABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) h->scsi_host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) sd->bus, sd->target, sd->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) case HPSA_LV_UNDERGOING_RPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) h->scsi_host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) sd->bus, sd->target, sd->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) case HPSA_LV_PENDING_RPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) h->scsi_host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) sd->bus, sd->target, sd->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) case HPSA_LV_ENCRYPTED_NO_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) h->scsi_host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) sd->bus, sd->target, sd->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) h->scsi_host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) sd->bus, sd->target, sd->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) case HPSA_LV_UNDERGOING_ENCRYPTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) h->scsi_host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) sd->bus, sd->target, sd->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) h->scsi_host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) sd->bus, sd->target, sd->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) h->scsi_host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) sd->bus, sd->target, sd->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) case HPSA_LV_PENDING_ENCRYPTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) h->scsi_host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) sd->bus, sd->target, sd->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) h->scsi_host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) sd->bus, sd->target, sd->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * Figure the list of physical drive pointers for a logical drive with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * raid offload configured.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) struct hpsa_scsi_dev_t *dev[], int ndevices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) struct hpsa_scsi_dev_t *logical_drive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) struct raid_map_data *map = &logical_drive->raid_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) struct raid_map_disk_data *dd = &map->data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) le16_to_cpu(map->metadata_disks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) int nraid_map_entries = le16_to_cpu(map->row_cnt) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) le16_to_cpu(map->layout_map_count) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) total_disks_per_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) int nphys_disk = le16_to_cpu(map->layout_map_count) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) total_disks_per_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) int qdepth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) nraid_map_entries = RAID_MAP_MAX_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) logical_drive->nphysical_disks = nraid_map_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) qdepth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) for (i = 0; i < nraid_map_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) logical_drive->phys_disk[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) if (!logical_drive->offload_config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) for (j = 0; j < ndevices; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) if (dev[j] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (dev[j]->devtype != TYPE_DISK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) dev[j]->devtype != TYPE_ZBC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) if (is_logical_device(dev[j]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) logical_drive->phys_disk[i] = dev[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) if (i < nphys_disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) qdepth = min(h->nr_cmds, qdepth +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) logical_drive->phys_disk[i]->queue_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) * This can happen if a physical drive is removed and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) * the logical drive is degraded. In that case, the RAID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) * map data will refer to a physical disk which isn't actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * present. And in that case offload_enabled should already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * be 0, but we'll turn it off here just in case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) if (!logical_drive->phys_disk[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) "%s: [%d:%d:%d:%d] A phys disk component of LV is missing, turning off offload_enabled for LV.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) h->scsi_host->host_no, logical_drive->bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) logical_drive->target, logical_drive->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) hpsa_turn_off_ioaccel_for_device(logical_drive);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) logical_drive->queue_depth = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) if (nraid_map_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) * This is correct for reads, too high for full stripe writes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) * way too high for partial stripe writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) logical_drive->queue_depth = qdepth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) if (logical_drive->external)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) logical_drive->queue_depth = EXTERNAL_QD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) logical_drive->queue_depth = h->nr_cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) struct hpsa_scsi_dev_t *dev[], int ndevices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) for (i = 0; i < ndevices; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) if (dev[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) if (dev[i]->devtype != TYPE_DISK &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) dev[i]->devtype != TYPE_ZBC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) if (!is_logical_device(dev[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) * If offload is currently enabled, the RAID map and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) * phys_disk[] assignment *better* not be changing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) * because we would be changing ioaccel phsy_disk[] pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) * on a ioaccel volume processing I/O requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) * If an ioaccel volume status changed, initially because it was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) * re-configured and thus underwent a transformation, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) * a drive failed, we would have received a state change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) * request and ioaccel should have been turned off. When the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) * transformation completes, we get another state change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * request to turn ioaccel back on. In this case, we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * to update the ioaccel information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * Thus: If it is not currently enabled, but will be after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) * the scan completes, make sure the ioaccel pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) * are up to date.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) if (!dev[i]->offload_enabled && dev[i]->offload_to_be_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) if (!h->scsi_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) if (is_logical_device(device)) /* RAID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) rc = scsi_add_device(h->scsi_host, device->bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) device->target, device->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) else /* HBA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) rc = hpsa_add_sas_device(h->sas_host, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) static int hpsa_find_outstanding_commands_for_dev(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) struct hpsa_scsi_dev_t *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) for (i = 0; i < h->nr_cmds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) struct CommandList *c = h->cmd_pool + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) int refcount = atomic_inc_return(&c->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) dev->scsi3addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) spin_lock_irqsave(&h->lock, flags); /* Implied MB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) if (!hpsa_is_cmd_idle(c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) ++count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) #define NUM_WAIT 20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) static void hpsa_wait_for_outstanding_commands_for_dev(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) struct hpsa_scsi_dev_t *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) int cmds = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) int waits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) int num_wait = NUM_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) if (device->external)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) num_wait = HPSA_EH_PTRAID_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) cmds = hpsa_find_outstanding_commands_for_dev(h, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (cmds == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) if (++waits > num_wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) if (waits > num_wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) "%s: removing device [%d:%d:%d:%d] with %d outstanding commands!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) h->scsi_host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) device->bus, device->target, device->lun, cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) static void hpsa_remove_device(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) struct hpsa_scsi_dev_t *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) struct scsi_device *sdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) if (!h->scsi_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * Allow for commands to drain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) device->removed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) hpsa_wait_for_outstanding_commands_for_dev(h, device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) if (is_logical_device(device)) { /* RAID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) sdev = scsi_device_lookup(h->scsi_host, device->bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) device->target, device->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) if (sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) scsi_remove_device(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) * We don't expect to get here. Future commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) * to this device will get a selection timeout as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) * if the device were gone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) hpsa_show_dev_msg(KERN_WARNING, h, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) "didn't find device for removal.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) } else { /* HBA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) hpsa_remove_sas_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) static void adjust_hpsa_scsi_table(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) struct hpsa_scsi_dev_t *sd[], int nsds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) /* sd contains scsi3 addresses and devtypes, and inquiry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * data. This function takes what's in sd to be the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) * reality and updates h->dev[] to reflect that reality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) int i, entry, device_change, changes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) struct hpsa_scsi_dev_t *csd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) struct hpsa_scsi_dev_t **added, **removed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) int nadded, nremoved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) * A reset can cause a device status to change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) * re-schedule the scan to see what happened.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) spin_lock_irqsave(&h->reset_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) if (h->reset_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) h->drv_req_rescan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) spin_unlock_irqrestore(&h->reset_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) spin_unlock_irqrestore(&h->reset_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) added = kcalloc(HPSA_MAX_DEVICES, sizeof(*added), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) removed = kcalloc(HPSA_MAX_DEVICES, sizeof(*removed), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) if (!added || !removed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) dev_warn(&h->pdev->dev, "out of memory in "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) "adjust_hpsa_scsi_table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) goto free_and_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) spin_lock_irqsave(&h->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) /* find any devices in h->dev[] that are not in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) * sd[] and remove them from h->dev[], and for any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) * devices which have changed, remove the old device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) * info and add the new device info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) * If minor device attributes change, just update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) * the existing device structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) nremoved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) nadded = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) while (i < h->ndevices) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) csd = h->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (device_change == DEVICE_NOT_FOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) changes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) hpsa_scsi_remove_entry(h, i, removed, &nremoved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) continue; /* remove ^^^, hence i not incremented */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) } else if (device_change == DEVICE_CHANGED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) changes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) hpsa_scsi_replace_entry(h, i, sd[entry],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) added, &nadded, removed, &nremoved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) /* Set it to NULL to prevent it from being freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) * at the bottom of hpsa_update_scsi_devices()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) sd[entry] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) } else if (device_change == DEVICE_UPDATED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) hpsa_scsi_update_entry(h, i, sd[entry]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) /* Now, make sure every device listed in sd[] is also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) * listed in h->dev[], adding them if they aren't found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) for (i = 0; i < nsds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) if (!sd[i]) /* if already added above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) * as the SCSI mid-layer does not handle such devices well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) * It relentlessly loops sending TUR at 3Hz, then READ(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) * at 160Hz, and prevents the system from coming up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) if (sd[i]->volume_offline) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) hpsa_show_volume_status(h, sd[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) device_change = hpsa_scsi_find_entry(sd[i], h->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) h->ndevices, &entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) if (device_change == DEVICE_NOT_FOUND) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) changes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) sd[i] = NULL; /* prevent from being freed later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) } else if (device_change == DEVICE_CHANGED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) /* should never happen... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) changes++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) "device unexpectedly changed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) /* but if it does happen, we just ignore that device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) * Now that h->dev[]->phys_disk[] is coherent, we can enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) * any logical drives that need it enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) * The raid map should be current by now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) * We are updating the device list used for I/O requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) for (i = 0; i < h->ndevices; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) if (h->dev[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) spin_unlock_irqrestore(&h->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) /* Monitor devices which are in one of several NOT READY states to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) * brought online later. This must be done without holding h->devlock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) * so don't touch h->dev[]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) for (i = 0; i < nsds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) if (!sd[i]) /* if already added above. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) if (sd[i]->volume_offline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) /* Don't notify scsi mid layer of any changes the first time through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) * (or if there are no changes) scsi_scan_host will do it later the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) * first time through.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) if (!changes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) goto free_and_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) /* Notify scsi mid layer of any removed devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) for (i = 0; i < nremoved; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) if (removed[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) if (removed[i]->expose_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) hpsa_remove_device(h, removed[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) kfree(removed[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) removed[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) /* Notify scsi mid layer of any added devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) for (i = 0; i < nadded; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) if (added[i] == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) if (!(added[i]->expose_device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) rc = hpsa_add_device(h, added[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) "addition failed %d, device not added.", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) /* now we have to remove it from h->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) * since it didn't get added to scsi mid layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) fixup_botched_add(h, added[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) h->drv_req_rescan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) free_and_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) kfree(added);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) kfree(removed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) * Assume's h->devlock is held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) int bus, int target, int lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) struct hpsa_scsi_dev_t *sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) for (i = 0; i < h->ndevices; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) sd = h->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) if (sd->bus == bus && sd->target == target && sd->lun == lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) return sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) static int hpsa_slave_alloc(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) struct hpsa_scsi_dev_t *sd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) h = sdev_to_hba(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) spin_lock_irqsave(&h->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (sdev_channel(sdev) == HPSA_PHYSICAL_DEVICE_BUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) struct scsi_target *starget;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) struct sas_rphy *rphy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) starget = scsi_target(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) rphy = target_to_rphy(starget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) sd = hpsa_find_device_by_sas_rphy(h, rphy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) if (sd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) sd->target = sdev_id(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) sd->lun = sdev->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) if (!sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) sdev_id(sdev), sdev->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) if (sd && sd->expose_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) atomic_set(&sd->ioaccel_cmds_out, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) sdev->hostdata = sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) sdev->hostdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) spin_unlock_irqrestore(&h->devlock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) /* configure scsi device based on internal per-device structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) #define CTLR_TIMEOUT (120 * HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) static int hpsa_slave_configure(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) struct hpsa_scsi_dev_t *sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) int queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) sd = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) sdev->no_uld_attach = !sd || !sd->expose_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) if (sd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) sd->was_removed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) queue_depth = sd->queue_depth != 0 ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) sd->queue_depth : sdev->host->can_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) if (sd->external) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) queue_depth = EXTERNAL_QD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) sdev->eh_timeout = HPSA_EH_PTRAID_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) blk_queue_rq_timeout(sdev->request_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) HPSA_EH_PTRAID_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) if (is_hba_lunid(sd->scsi3addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) sdev->eh_timeout = CTLR_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) blk_queue_rq_timeout(sdev->request_queue, CTLR_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) queue_depth = sdev->host->can_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) scsi_change_queue_depth(sdev, queue_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) static void hpsa_slave_destroy(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) struct hpsa_scsi_dev_t *hdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) hdev = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) if (hdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) hdev->was_removed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) if (!h->ioaccel2_cmd_sg_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) for (i = 0; i < h->nr_cmds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) kfree(h->ioaccel2_cmd_sg_list[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) h->ioaccel2_cmd_sg_list[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) kfree(h->ioaccel2_cmd_sg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) h->ioaccel2_cmd_sg_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) if (h->chainsize <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) h->ioaccel2_cmd_sg_list =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) kcalloc(h->nr_cmds, sizeof(*h->ioaccel2_cmd_sg_list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) if (!h->ioaccel2_cmd_sg_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) for (i = 0; i < h->nr_cmds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) h->ioaccel2_cmd_sg_list[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) kmalloc_array(h->maxsgentries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) sizeof(*h->ioaccel2_cmd_sg_list[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) if (!h->ioaccel2_cmd_sg_list[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) goto clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) clean:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) hpsa_free_ioaccel2_sg_chain_blocks(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (!h->cmd_sg_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) for (i = 0; i < h->nr_cmds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) kfree(h->cmd_sg_list[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) h->cmd_sg_list[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) kfree(h->cmd_sg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) h->cmd_sg_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) if (h->chainsize <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) h->cmd_sg_list = kcalloc(h->nr_cmds, sizeof(*h->cmd_sg_list),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) if (!h->cmd_sg_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) for (i = 0; i < h->nr_cmds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) h->cmd_sg_list[i] = kmalloc_array(h->chainsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) sizeof(*h->cmd_sg_list[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) if (!h->cmd_sg_list[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) goto clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) clean:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) hpsa_free_sg_chain_blocks(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) struct io_accel2_cmd *cp, struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) struct ioaccel2_sg_element *chain_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) u64 temp64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) u32 chain_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) chain_size = le32_to_cpu(cp->sg[0].length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) if (dma_mapping_error(&h->pdev->dev, temp64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) /* prevent subsequent unmapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) cp->sg->address = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) cp->sg->address = cpu_to_le64(temp64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) struct io_accel2_cmd *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) struct ioaccel2_sg_element *chain_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) u64 temp64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) u32 chain_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) chain_sg = cp->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) temp64 = le64_to_cpu(chain_sg->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) chain_size = le32_to_cpu(cp->sg[0].length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) static int hpsa_map_sg_chain_block(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) struct SGDescriptor *chain_sg, *chain_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) u64 temp64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) u32 chain_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) chain_block = h->cmd_sg_list[c->cmdindex];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) chain_len = sizeof(*chain_sg) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) chain_sg->Len = cpu_to_le32(chain_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) if (dma_mapping_error(&h->pdev->dev, temp64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) /* prevent subsequent unmapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) chain_sg->Addr = cpu_to_le64(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) chain_sg->Addr = cpu_to_le64(temp64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) struct SGDescriptor *chain_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) /* Decode the various types of errors on ioaccel2 path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) * Return 1 for any error that should generate a RAID path retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) * Return 0 for errors that don't require a RAID path retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) static int handle_ioaccel_mode2_error(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) struct CommandList *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) struct scsi_cmnd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) struct io_accel2_cmd *c2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) struct hpsa_scsi_dev_t *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) int data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) int retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) u32 ioaccel2_resid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) switch (c2->error_data.serv_response) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) case IOACCEL2_SERV_RESPONSE_COMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) switch (c2->error_data.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) if (cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) cmd->result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) cmd->result |= SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) if (c2->error_data.data_present !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) IOACCEL2_SENSE_DATA_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) memset(cmd->sense_buffer, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) /* copy the sense data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) data_len = c2->error_data.sense_data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) if (data_len > SCSI_SENSE_BUFFERSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) data_len = SCSI_SENSE_BUFFERSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) if (data_len > sizeof(c2->error_data.sense_data_buff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) data_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) sizeof(c2->error_data.sense_data_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) memcpy(cmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) c2->error_data.sense_data_buff, data_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) case IOACCEL2_SERV_RESPONSE_FAILURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) switch (c2->error_data.status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) case IOACCEL2_STATUS_SR_IO_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) case IOACCEL2_STATUS_SR_IO_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) case IOACCEL2_STATUS_SR_OVERRUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) case IOACCEL2_STATUS_SR_UNDERRUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) cmd->result = (DID_OK << 16); /* host byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) ioaccel2_resid = get_unaligned_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) &c2->error_data.resid_cnt[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) scsi_set_resid(cmd, ioaccel2_resid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) case IOACCEL2_STATUS_SR_INVALID_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) * Did an HBA disk disappear? We will eventually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) * get a state change event from the controller but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) * in the meantime, we need to tell the OS that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) * HBA disk is no longer there and stop I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) * from going down. This allows the potential re-insert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) * of the disk to get the same device node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) if (dev->physical_device && dev->expose_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) dev->removed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) h->drv_req_rescan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) "%s: device is gone!\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) * Retry by sending down the RAID path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) * We will get an event from ctlr to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) * trigger rescan regardless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) retry = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) if (dev->in_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) retry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) return retry; /* retry on raid path? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) static void hpsa_cmd_resolve_events(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) struct hpsa_scsi_dev_t *dev = c->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) * Reset c->scsi_cmd here so that the reset handler will know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) * this command has completed. Then, check to see if the handler is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) * waiting for this command, and, if so, wake it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) c->scsi_cmd = SCSI_CMD_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) mb(); /* Declare command idle before checking for pending events. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) if (dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) atomic_dec(&dev->commands_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) if (dev->in_reset &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) atomic_read(&dev->commands_outstanding) <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) wake_up_all(&h->event_sync_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) hpsa_cmd_resolve_events(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) cmd_tagged_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) static void hpsa_cmd_free_and_done(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) struct CommandList *c, struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) hpsa_cmd_resolve_and_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) if (cmd && cmd->scsi_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) cmd->scsi_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) INIT_WORK(&c->work, hpsa_command_resubmit_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) static void process_ioaccel2_completion(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) struct CommandList *c, struct scsi_cmnd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) struct hpsa_scsi_dev_t *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) /* check for good status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) if (likely(c2->error_data.serv_response == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) c2->error_data.status == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) cmd->result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) return hpsa_cmd_free_and_done(h, c, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) * Any RAID offload error results in retry which will use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) * the normal I/O path so the controller can handle whatever is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) * wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) if (is_logical_device(dev) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) c2->error_data.serv_response ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) IOACCEL2_SERV_RESPONSE_FAILURE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) if (c2->error_data.status ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) hpsa_turn_off_ioaccel_for_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) if (dev->in_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) cmd->result = DID_RESET << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) return hpsa_cmd_free_and_done(h, c, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) return hpsa_retry_cmd(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) if (handle_ioaccel_mode2_error(h, c, cmd, c2, dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) return hpsa_retry_cmd(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) return hpsa_cmd_free_and_done(h, c, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) /* Returns 0 on success, < 0 otherwise. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) struct CommandList *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) u8 tmf_status = cp->err_info->ScsiStatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) switch (tmf_status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) case CISS_TMF_COMPLETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) * CISS_TMF_COMPLETE never happens, instead,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) * ei->CommandStatus == 0 for this case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) case CISS_TMF_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) case CISS_TMF_INVALID_FRAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) case CISS_TMF_NOT_SUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) case CISS_TMF_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) case CISS_TMF_WRONG_LUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) case CISS_TMF_OVERLAPPED_TAG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) tmf_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) return -tmf_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) static void complete_scsi_command(struct CommandList *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) struct scsi_cmnd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) struct ErrorInfo *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) struct hpsa_scsi_dev_t *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) struct io_accel2_cmd *c2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) u8 sense_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) u8 asc; /* additional sense code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) u8 ascq; /* additional sense code qualifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) unsigned long sense_data_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) ei = cp->err_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) cmd = cp->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) h = cp->h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) if (!cmd->device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) return hpsa_cmd_free_and_done(h, cp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) dev = cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) return hpsa_cmd_free_and_done(h, cp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) scsi_dma_unmap(cmd); /* undo the DMA mappings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) if ((cp->cmd_type == CMD_SCSI) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) hpsa_unmap_sg_chain_block(h, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) if ((cp->cmd_type == CMD_IOACCEL2) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) cmd->result = (DID_OK << 16); /* host byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) /* SCSI command has already been cleaned up in SML */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) if (dev->was_removed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) hpsa_cmd_resolve_and_free(h, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) if (dev->physical_device && dev->expose_device &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) dev->removed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) return hpsa_cmd_free_and_done(h, cp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) if (likely(cp->phys_disk != NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) * We check for lockup status here as it may be set for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) * fail_all_oustanding_cmds()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) /* DID_NO_CONNECT will prevent a retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) return hpsa_cmd_free_and_done(h, cp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) if (cp->cmd_type == CMD_IOACCEL2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) return process_ioaccel2_completion(h, cp, cmd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) scsi_set_resid(cmd, ei->ResidualCnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) if (ei->CommandStatus == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) return hpsa_cmd_free_and_done(h, cp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) /* For I/O accelerator commands, copy over some fields to the normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) * CISS header used below for error handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) if (cp->cmd_type == CMD_IOACCEL1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) cp->Header.SGList = scsi_sg_count(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) IOACCEL1_IOFLAGS_CDBLEN_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) cp->Header.tag = c->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) /* Any RAID offload error results in retry which will use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) * the normal I/O path so the controller can handle whatever's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) * wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) if (is_logical_device(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) dev->offload_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) return hpsa_retry_cmd(h, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) /* an error has occurred */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) switch (ei->CommandStatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) case CMD_TARGET_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) cmd->result |= ei->ScsiStatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) /* copy the sense data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) sense_data_size = SCSI_SENSE_BUFFERSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) sense_data_size = sizeof(ei->SenseInfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) if (ei->SenseLen < sense_data_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) sense_data_size = ei->SenseLen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) if (ei->ScsiStatus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) decode_sense_data(ei->SenseInfo, sense_data_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) &sense_key, &asc, &ascq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) switch (sense_key) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) case ABORTED_COMMAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) cmd->result |= DID_SOFT_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) case UNIT_ATTENTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) if (asc == 0x3F && ascq == 0x0E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) h->drv_req_rescan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) case ILLEGAL_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) if (asc == 0x25 && ascq == 0x00) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) dev->removed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) /* Problem was not a check condition
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) * Pass it up to the upper layers...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) if (ei->ScsiStatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) "Returning result: 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) cp, ei->ScsiStatus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) sense_key, asc, ascq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) cmd->result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) } else { /* scsi status is zero??? How??? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) "Returning no connection.\n", cp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) /* Ordinarily, this case should never happen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) * but there is a bug in some released firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) * revisions that allows it to happen if, for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) * example, a 4100 backplane loses power and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) * the tape drive is in it. We assume that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) * it's a fatal error of some kind because we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) * can't show that it wasn't. We will make it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) * look like selection timeout since that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) * the most common reason for this to occur,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) * and it's severe enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) case CMD_DATA_OVERRUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) "CDB %16phN data overrun\n", cp->Request.CDB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) case CMD_INVALID: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) /* print_bytes(cp, sizeof(*cp), 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) print_cmd(cp); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) /* We get CMD_INVALID if you address a non-existent device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) * instead of a selection timeout (no response). You will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) * see this if you yank out a drive, then try to access it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) * This is kind of a shame because it means that any other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) * CMD_INVALID (e.g. driver bug) will get interpreted as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) * missing target. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) case CMD_PROTOCOL_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) cmd->result = DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) cp->Request.CDB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) case CMD_HARDWARE_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) cmd->result = DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) cp->Request.CDB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) case CMD_CONNECTION_LOST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) cmd->result = DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) cp->Request.CDB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) case CMD_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) cmd->result = DID_ABORT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) case CMD_ABORT_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) cmd->result = DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) cp->Request.CDB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) case CMD_UNSOLICITED_ABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) cp->Request.CDB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) case CMD_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) cmd->result = DID_TIME_OUT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) cp->Request.CDB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) case CMD_UNABORTABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) cmd->result = DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) dev_warn(&h->pdev->dev, "Command unabortable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) case CMD_TMF_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) cmd->result = DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) case CMD_IOACCEL_DISABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) /* This only handles the direct pass-through case since RAID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) * offload is handled above. Just attempt a retry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) cmd->result = DID_SOFT_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) "cp %p had HP SSD Smart Path error\n", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) cmd->result = DID_ERROR << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) cp, ei->CommandStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) return hpsa_cmd_free_and_done(h, cp, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) int sg_used, enum dma_data_direction data_direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) for (i = 0; i < sg_used; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) le32_to_cpu(c->SG[i].Len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) static int hpsa_map_one(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) struct CommandList *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) size_t buflen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) enum dma_data_direction data_direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) u64 addr64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) if (buflen == 0 || data_direction == DMA_NONE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) cp->Header.SGList = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) cp->Header.SGTotal = cpu_to_le16(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) if (dma_mapping_error(&pdev->dev, addr64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) /* Prevent subsequent unmap of something never mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) cp->Header.SGList = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) cp->Header.SGTotal = cpu_to_le16(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) cp->SG[0].Addr = cpu_to_le64(addr64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) cp->SG[0].Len = cpu_to_le32(buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) cp->Header.SGList = 1; /* no. SGs contig in this cmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) #define NO_TIMEOUT ((unsigned long) -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) #define DEFAULT_TIMEOUT 30000 /* milliseconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) DECLARE_COMPLETION_ONSTACK(wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) c->waiting = &wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) __enqueue_cmd_and_start_io(h, c, reply_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) if (timeout_msecs == NO_TIMEOUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) /* TODO: get rid of this no-timeout thing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) wait_for_completion_io(&wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) return IO_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) if (!wait_for_completion_io_timeout(&wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) msecs_to_jiffies(timeout_msecs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) dev_warn(&h->pdev->dev, "Command timed out.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) return IO_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) int reply_queue, unsigned long timeout_msecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) if (unlikely(lockup_detected(h))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) return IO_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) static u32 lockup_detected(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) u32 rc, *lockup_detected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) rc = *lockup_detected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) #define MAX_DRIVER_CMD_RETRIES 25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) struct CommandList *c, enum dma_data_direction data_direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) unsigned long timeout_msecs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) int backoff_time = 10, retry_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) memset(c->err_info, 0, sizeof(*c->err_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) timeout_msecs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) retry_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) if (retry_count > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) msleep(backoff_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) if (backoff_time < 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) backoff_time *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) } while ((check_for_unit_attention(h, c) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) check_for_busy(h, c)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) retry_count <= MAX_DRIVER_CMD_RETRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) hpsa_pci_unmap(h->pdev, c, 1, data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) if (retry_count > MAX_DRIVER_CMD_RETRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) const u8 *cdb = c->Request.CDB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) const u8 *lun = c->Header.LUN.LunAddrBytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) dev_warn(&h->pdev->dev, "%s: LUN:%8phN CDB:%16phN\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) txt, lun, cdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) static void hpsa_scsi_interpret_error(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) struct CommandList *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) const struct ErrorInfo *ei = cp->err_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) struct device *d = &cp->h->pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) u8 sense_key, asc, ascq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) int sense_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) switch (ei->CommandStatus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) case CMD_TARGET_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) if (ei->SenseLen > sizeof(ei->SenseInfo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) sense_len = sizeof(ei->SenseInfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) sense_len = ei->SenseLen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) decode_sense_data(ei->SenseInfo, sense_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) &sense_key, &asc, &ascq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) hpsa_print_cmd(h, "SCSI status", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) sense_key, asc, ascq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) if (ei->ScsiStatus == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) dev_warn(d, "SCSI status is abnormally zero. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) "(probably indicates selection timeout "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) "reported incorrectly due to a known "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) "firmware bug, circa July, 2001.)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) case CMD_DATA_OVERRUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) hpsa_print_cmd(h, "overrun condition", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) case CMD_INVALID: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) /* controller unfortunately reports SCSI passthru's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) * to non-existent targets as invalid commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) hpsa_print_cmd(h, "invalid command", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) dev_warn(d, "probably means device no longer present\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) case CMD_PROTOCOL_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) hpsa_print_cmd(h, "protocol error", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) case CMD_HARDWARE_ERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) hpsa_print_cmd(h, "hardware error", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) case CMD_CONNECTION_LOST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) hpsa_print_cmd(h, "connection lost", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) case CMD_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) hpsa_print_cmd(h, "aborted", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) case CMD_ABORT_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) hpsa_print_cmd(h, "abort failed", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) case CMD_UNSOLICITED_ABORT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) hpsa_print_cmd(h, "unsolicited abort", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) case CMD_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) hpsa_print_cmd(h, "timed out", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) case CMD_UNABORTABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) hpsa_print_cmd(h, "unabortable", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) case CMD_CTLR_LOCKUP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) hpsa_print_cmd(h, "controller lockup detected", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) hpsa_print_cmd(h, "unknown status", cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) dev_warn(d, "Unknown command status %x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) ei->CommandStatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) u8 page, u8 *buf, size_t bufsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) int rc = IO_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) struct ErrorInfo *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) c = cmd_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) if (fill_cmd(c, RECEIVE_DIAGNOSTIC, h, buf, bufsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008) page, scsi3addr, TYPE_CMD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) NO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) ei = c->err_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) hpsa_scsi_interpret_error(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) static u64 hpsa_get_enclosure_logical_identifier(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) u8 *scsi3addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029) u8 *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) u64 sa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) buf = kzalloc(1024, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) rc = hpsa_do_receive_diagnostic(h, scsi3addr, RECEIVE_DIAGNOSTIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038) buf, 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) sa = get_unaligned_be64(buf+12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) return sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050) static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) u16 page, unsigned char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) unsigned char bufsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) int rc = IO_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) struct ErrorInfo *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058) c = cmd_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) page, scsi3addr, TYPE_CMD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) NO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069) ei = c->err_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) hpsa_scsi_interpret_error(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) static int hpsa_send_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) u8 reset_type, int reply_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) int rc = IO_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) struct ErrorInfo *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) c = cmd_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) c->device = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) /* fill_cmd can't fail here, no data buffer to map. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) (void) fill_cmd(c, reset_type, h, NULL, 0, 0, dev->scsi3addr, TYPE_MSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091) rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) dev_warn(&h->pdev->dev, "Failed to send reset command\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096) /* no unmap needed here because no data xfer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) ei = c->err_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) if (ei->CommandStatus != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) hpsa_scsi_interpret_error(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) struct hpsa_scsi_dev_t *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) unsigned char *scsi3addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) bool match = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) if (hpsa_is_cmd_idle(c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120) switch (c->cmd_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) case CMD_SCSI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) case CMD_IOCTL_PEND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) sizeof(c->Header.LUN.LunAddrBytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) case CMD_IOACCEL1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) case CMD_IOACCEL2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) if (c->phys_disk == dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) /* HBA mode match */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) match = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) /* Possible RAID mode -- check each phys dev. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) /* FIXME: Do we need to take out a lock here? If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135) * so, we could just call hpsa_get_pdisk_of_ioaccel2()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) * instead. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) for (i = 0; i < dev->nphysical_disks && !match; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) /* FIXME: an alternate test might be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) * match = dev->phys_disk[i]->ioaccel_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) * == c2->scsi_nexus; */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142) match = dev->phys_disk[i] == c->phys_disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) case IOACCEL2_TMF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) for (i = 0; i < dev->nphysical_disks && !match; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149) match = dev->phys_disk[i]->ioaccel_handle ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) le32_to_cpu(ac->it_nexus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) case 0: /* The command is in the middle of being initialized. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155) match = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) c->cmd_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) return match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) u8 reset_type, int reply_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172) /* We can really only handle one reset at a time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) rc = hpsa_send_reset(h, dev, reset_type, reply_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) /* incremented by sending the reset request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) atomic_dec(&dev->commands_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) wait_event(h->event_sync_wait_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) atomic_read(&dev->commands_outstanding) <= 0 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3184) lockup_detected(h));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3187) if (unlikely(lockup_detected(h))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3188) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3189) "Controller lockup detected during reset wait\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3190) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3193) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3194) rc = wait_for_device_to_become_ready(h, dev->scsi3addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3196) mutex_unlock(&h->reset_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3197) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3200) static void hpsa_get_raid_level(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3201) unsigned char *scsi3addr, unsigned char *raid_level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3203) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3204) unsigned char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3206) *raid_level = RAID_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3207) buf = kzalloc(64, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3208) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3209) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3211) if (!hpsa_vpd_page_supported(h, scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3212) HPSA_VPD_LV_DEVICE_GEOMETRY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3213) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3215) rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3216) HPSA_VPD_LV_DEVICE_GEOMETRY, buf, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3218) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3219) *raid_level = buf[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3220) if (*raid_level > RAID_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3221) *raid_level = RAID_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3222) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3223) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3224) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3227) #define HPSA_MAP_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3228) #ifdef HPSA_MAP_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3229) static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3230) struct raid_map_data *map_buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3232) struct raid_map_disk_data *dd = &map_buff->data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3233) int map, row, col;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3234) u16 map_cnt, row_cnt, disks_per_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3236) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3237) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3239) /* Show details only if debugging has been activated. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3240) if (h->raid_offload_debug < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3241) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3243) dev_info(&h->pdev->dev, "structure_size = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3244) le32_to_cpu(map_buff->structure_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3245) dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3246) le32_to_cpu(map_buff->volume_blk_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3247) dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3248) le64_to_cpu(map_buff->volume_blk_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3249) dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3250) map_buff->phys_blk_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3251) dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3252) map_buff->parity_rotation_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3253) dev_info(&h->pdev->dev, "strip_size = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3254) le16_to_cpu(map_buff->strip_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3255) dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3256) le64_to_cpu(map_buff->disk_starting_blk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3257) dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3258) le64_to_cpu(map_buff->disk_blk_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3259) dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3260) le16_to_cpu(map_buff->data_disks_per_row));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3261) dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3262) le16_to_cpu(map_buff->metadata_disks_per_row));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3263) dev_info(&h->pdev->dev, "row_cnt = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3264) le16_to_cpu(map_buff->row_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3265) dev_info(&h->pdev->dev, "layout_map_count = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3266) le16_to_cpu(map_buff->layout_map_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3267) dev_info(&h->pdev->dev, "flags = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3268) le16_to_cpu(map_buff->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3269) dev_info(&h->pdev->dev, "encryption = %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3270) le16_to_cpu(map_buff->flags) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3271) RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3272) dev_info(&h->pdev->dev, "dekindex = %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3273) le16_to_cpu(map_buff->dekindex));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3274) map_cnt = le16_to_cpu(map_buff->layout_map_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3275) for (map = 0; map < map_cnt; map++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3276) dev_info(&h->pdev->dev, "Map%u:\n", map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3277) row_cnt = le16_to_cpu(map_buff->row_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3278) for (row = 0; row < row_cnt; row++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3279) dev_info(&h->pdev->dev, " Row%u:\n", row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3280) disks_per_row =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3281) le16_to_cpu(map_buff->data_disks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3282) for (col = 0; col < disks_per_row; col++, dd++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3283) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3284) " D%02u: h=0x%04x xor=%u,%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3285) col, dd->ioaccel_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3286) dd->xor_mult[0], dd->xor_mult[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3287) disks_per_row =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3288) le16_to_cpu(map_buff->metadata_disks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3289) for (col = 0; col < disks_per_row; col++, dd++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3290) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3291) " M%02u: h=0x%04x xor=%u,%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3292) col, dd->ioaccel_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3293) dd->xor_mult[0], dd->xor_mult[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3297) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3298) static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3299) __attribute__((unused)) int rc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3300) __attribute__((unused)) struct raid_map_data *map_buff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3303) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3305) static int hpsa_get_raid_map(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3306) unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3308) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3309) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3310) struct ErrorInfo *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3312) c = cmd_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3314) if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3315) sizeof(this_device->raid_map), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3316) scsi3addr, TYPE_CMD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3317) dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3318) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3319) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3321) rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3322) NO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3323) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3324) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3325) ei = c->err_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3326) if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3327) hpsa_scsi_interpret_error(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3328) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3329) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3331) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3333) /* @todo in the future, dynamically allocate RAID map memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3334) if (le32_to_cpu(this_device->raid_map.structure_size) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3335) sizeof(this_device->raid_map)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3336) dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3337) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3339) hpsa_debug_map_buff(h, rc, &this_device->raid_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3340) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3341) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3342) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3343) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3346) static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3347) unsigned char scsi3addr[], u16 bmic_device_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3348) struct bmic_sense_subsystem_info *buf, size_t bufsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3350) int rc = IO_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3351) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3352) struct ErrorInfo *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3354) c = cmd_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3356) rc = fill_cmd(c, BMIC_SENSE_SUBSYSTEM_INFORMATION, h, buf, bufsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3357) 0, RAID_CTLR_LUNID, TYPE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3358) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3359) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3361) c->Request.CDB[2] = bmic_device_index & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3362) c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3364) rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3365) NO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3366) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3367) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3368) ei = c->err_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3369) if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3370) hpsa_scsi_interpret_error(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3371) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3373) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3374) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3375) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3378) static int hpsa_bmic_id_controller(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3379) struct bmic_identify_controller *buf, size_t bufsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3381) int rc = IO_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3382) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3383) struct ErrorInfo *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3385) c = cmd_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3387) rc = fill_cmd(c, BMIC_IDENTIFY_CONTROLLER, h, buf, bufsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3388) 0, RAID_CTLR_LUNID, TYPE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3389) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3390) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3392) rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3393) NO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3394) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3395) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3396) ei = c->err_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3397) if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3398) hpsa_scsi_interpret_error(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3399) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3401) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3402) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3403) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3406) static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3407) unsigned char scsi3addr[], u16 bmic_device_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3408) struct bmic_identify_physical_device *buf, size_t bufsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3410) int rc = IO_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3411) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3412) struct ErrorInfo *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3414) c = cmd_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3415) rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3416) 0, RAID_CTLR_LUNID, TYPE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3417) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3418) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3420) c->Request.CDB[2] = bmic_device_index & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3421) c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3423) hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3424) NO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3425) ei = c->err_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3426) if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3427) hpsa_scsi_interpret_error(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3428) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3430) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3431) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3433) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3436) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3437) * get enclosure information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3438) * struct ReportExtendedLUNdata *rlep - Used for BMIC drive number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3439) * struct hpsa_scsi_dev_t *encl_dev - device entry for enclosure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3440) * Uses id_physical_device to determine the box_index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3442) static void hpsa_get_enclosure_info(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3443) unsigned char *scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3444) struct ReportExtendedLUNdata *rlep, int rle_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3445) struct hpsa_scsi_dev_t *encl_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3447) int rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3448) struct CommandList *c = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3449) struct ErrorInfo *ei = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3450) struct bmic_sense_storage_box_params *bssbp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3451) struct bmic_identify_physical_device *id_phys = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3452) struct ext_report_lun_entry *rle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3453) u16 bmic_device_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3455) if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3456) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3458) rle = &rlep->LUN[rle_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3460) encl_dev->eli =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3461) hpsa_get_enclosure_logical_identifier(h, scsi3addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3463) bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3465) if (encl_dev->target == -1 || encl_dev->lun == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3466) rc = IO_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3467) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3470) if (bmic_device_index == 0xFF00 || MASKED_DEVICE(&rle->lunid[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3471) rc = IO_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3472) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3475) bssbp = kzalloc(sizeof(*bssbp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3476) if (!bssbp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3477) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3479) id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3480) if (!id_phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3481) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3483) rc = hpsa_bmic_id_physical_device(h, scsi3addr, bmic_device_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3484) id_phys, sizeof(*id_phys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3485) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3486) dev_warn(&h->pdev->dev, "%s: id_phys failed %d bdi[0x%x]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3487) __func__, encl_dev->external, bmic_device_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3488) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3491) c = cmd_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3493) rc = fill_cmd(c, BMIC_SENSE_STORAGE_BOX_PARAMS, h, bssbp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3494) sizeof(*bssbp), 0, RAID_CTLR_LUNID, TYPE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3496) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3497) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3499) if (id_phys->phys_connector[1] == 'E')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3500) c->Request.CDB[5] = id_phys->box_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3501) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3502) c->Request.CDB[5] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3504) rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3505) NO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3506) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3507) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3509) ei = c->err_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3510) if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3511) rc = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3512) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3515) encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3516) memcpy(&encl_dev->phys_connector[id_phys->active_path_number],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3517) bssbp->phys_connector, sizeof(bssbp->phys_connector));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3519) rc = IO_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3520) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3521) kfree(bssbp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3522) kfree(id_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3524) if (c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3525) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3527) if (rc != IO_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3528) hpsa_show_dev_msg(KERN_INFO, h, encl_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3529) "Error, could not get enclosure information");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3532) static u64 hpsa_get_sas_address_from_report_physical(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3533) unsigned char *scsi3addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3535) struct ReportExtendedLUNdata *physdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3536) u32 nphysicals;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3537) u64 sa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3538) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3540) physdev = kzalloc(sizeof(*physdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3541) if (!physdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3542) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3544) if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3545) dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3546) kfree(physdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3547) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3549) nphysicals = get_unaligned_be32(physdev->LUNListLength) / 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3551) for (i = 0; i < nphysicals; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3552) if (!memcmp(&physdev->LUN[i].lunid[0], scsi3addr, 8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3553) sa = get_unaligned_be64(&physdev->LUN[i].wwid[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3554) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3557) kfree(physdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3559) return sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3562) static void hpsa_get_sas_address(struct ctlr_info *h, unsigned char *scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3563) struct hpsa_scsi_dev_t *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3565) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3566) u64 sa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3568) if (is_hba_lunid(scsi3addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3569) struct bmic_sense_subsystem_info *ssi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3571) ssi = kzalloc(sizeof(*ssi), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3572) if (!ssi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3573) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3575) rc = hpsa_bmic_sense_subsystem_information(h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3576) scsi3addr, 0, ssi, sizeof(*ssi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3577) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3578) sa = get_unaligned_be64(ssi->primary_world_wide_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3579) h->sas_address = sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3582) kfree(ssi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3583) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3584) sa = hpsa_get_sas_address_from_report_physical(h, scsi3addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3586) dev->sas_address = sa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3589) static void hpsa_ext_ctrl_present(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3590) struct ReportExtendedLUNdata *physdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3592) u32 nphysicals;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3593) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3595) if (h->discovery_polling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3596) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3598) nphysicals = (get_unaligned_be32(physdev->LUNListLength) / 24) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3600) for (i = 0; i < nphysicals; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3601) if (physdev->LUN[i].device_type ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3602) BMIC_DEVICE_TYPE_CONTROLLER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3603) && !is_hba_lunid(physdev->LUN[i].lunid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3604) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3605) "External controller present, activate discovery polling and disable rld caching\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3606) hpsa_disable_rld_caching(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3607) h->discovery_polling = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3608) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3613) /* Get a device id from inquiry page 0x83 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3614) static bool hpsa_vpd_page_supported(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3615) unsigned char scsi3addr[], u8 page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3617) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3618) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3619) int pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3620) unsigned char *buf, bufsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3621)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3622) buf = kzalloc(256, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3623) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3624) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3626) /* Get the size of the page list first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3627) rc = hpsa_scsi_do_inquiry(h, scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3628) VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3629) buf, HPSA_VPD_HEADER_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3630) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3631) goto exit_unsupported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3632) pages = buf[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3633) if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3634) bufsize = pages + HPSA_VPD_HEADER_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3635) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3636) bufsize = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3638) /* Get the whole VPD page list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3639) rc = hpsa_scsi_do_inquiry(h, scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3640) VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3641) buf, bufsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3642) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3643) goto exit_unsupported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3645) pages = buf[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3646) for (i = 1; i <= pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3647) if (buf[3 + i] == page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3648) goto exit_supported;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3649) exit_unsupported:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3650) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3651) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3652) exit_supported:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3653) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3654) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3657) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3658) * Called during a scan operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3659) * Sets ioaccel status on the new device list, not the existing device list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3660) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3661) * The device list used during I/O will be updated later in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3662) * adjust_hpsa_scsi_table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3664) static void hpsa_get_ioaccel_status(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3665) unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3667) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3668) unsigned char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3669) u8 ioaccel_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3671) this_device->offload_config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3672) this_device->offload_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3673) this_device->offload_to_be_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3675) buf = kzalloc(64, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3676) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3677) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3678) if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3679) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3680) rc = hpsa_scsi_do_inquiry(h, scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3681) VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3682) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3683) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3685) #define IOACCEL_STATUS_BYTE 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3686) #define OFFLOAD_CONFIGURED_BIT 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3687) #define OFFLOAD_ENABLED_BIT 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3688) ioaccel_status = buf[IOACCEL_STATUS_BYTE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3689) this_device->offload_config =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3690) !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3691) if (this_device->offload_config) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3692) bool offload_enabled =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3693) !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3695) * Check to see if offload can be enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3696) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3697) if (offload_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3698) rc = hpsa_get_raid_map(h, scsi3addr, this_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3699) if (rc) /* could not load raid_map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3700) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3701) this_device->offload_to_be_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3705) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3706) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3707) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3708) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3709)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3710) /* Get the device id from inquiry page 0x83 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3711) static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3712) unsigned char *device_id, int index, int buflen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3714) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3715) unsigned char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3717) /* Does controller have VPD for device id? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3718) if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_DEVICE_ID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3719) return 1; /* not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3721) buf = kzalloc(64, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3722) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3723) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3725) rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3726) HPSA_VPD_LV_DEVICE_ID, buf, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3727) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3728) if (buflen > 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3729) buflen = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3730) memcpy(device_id, &buf[8], buflen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3733) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3735) return rc; /*0 - got id, otherwise, didn't */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3738) static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3739) void *buf, int bufsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3740) int extended_response)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3741) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3742) int rc = IO_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3743) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3744) unsigned char scsi3addr[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3745) struct ErrorInfo *ei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3747) c = cmd_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3749) /* address the controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3750) memset(scsi3addr, 0, sizeof(scsi3addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3751) if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3752) buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3753) rc = -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3754) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3756) if (extended_response)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3757) c->Request.CDB[1] = extended_response;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3758) rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3759) NO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3760) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3761) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3762) ei = c->err_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3763) if (ei->CommandStatus != 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3764) ei->CommandStatus != CMD_DATA_UNDERRUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3765) hpsa_scsi_interpret_error(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3766) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3767) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3768) struct ReportLUNdata *rld = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3770) if (rld->extended_response_flag != extended_response) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3771) if (!h->legacy_board) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3772) dev_err(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3773) "report luns requested format %u, got %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3774) extended_response,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3775) rld->extended_response_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3776) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3777) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3778) rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3781) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3782) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3783) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3786) static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3787) struct ReportExtendedLUNdata *buf, int bufsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3789) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3790) struct ReportLUNdata *lbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3792) rc = hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3793) HPSA_REPORT_PHYS_EXTENDED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3794) if (!rc || rc != -EOPNOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3795) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3797) /* REPORT PHYS EXTENDED is not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3798) lbuf = kzalloc(sizeof(*lbuf), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3799) if (!lbuf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3800) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3802) rc = hpsa_scsi_do_report_luns(h, 0, lbuf, sizeof(*lbuf), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3803) if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3804) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3805) u32 nphys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3807) /* Copy ReportLUNdata header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3808) memcpy(buf, lbuf, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3809) nphys = be32_to_cpu(*((__be32 *)lbuf->LUNListLength)) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3810) for (i = 0; i < nphys; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3811) memcpy(buf->LUN[i].lunid, lbuf->LUN[i], 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3813) kfree(lbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3814) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3817) static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3818) struct ReportLUNdata *buf, int bufsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3820) return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3822)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3823) static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3824) int bus, int target, int lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3826) device->bus = bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3827) device->target = target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3828) device->lun = lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3831) /* Use VPD inquiry to get details of volume status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3832) static int hpsa_get_volume_status(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3833) unsigned char scsi3addr[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3835) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3836) int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3837) int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3838) unsigned char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3840) buf = kzalloc(64, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3841) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3842) return HPSA_VPD_LV_STATUS_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3844) /* Does controller have VPD for logical volume status? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3845) if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3846) goto exit_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3848) /* Get the size of the VPD return buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3849) rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3850) buf, HPSA_VPD_HEADER_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3851) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3852) goto exit_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3853) size = buf[3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3855) /* Now get the whole VPD buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3856) rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3857) buf, size + HPSA_VPD_HEADER_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3858) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3859) goto exit_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3860) status = buf[4]; /* status byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3862) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3863) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3864) exit_failed:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3865) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3866) return HPSA_VPD_LV_STATUS_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3867) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3869) /* Determine offline status of a volume.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3870) * Return either:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3871) * 0 (not offline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3872) * 0xff (offline for unknown reasons)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3873) * # (integer code indicating one of several NOT READY states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3874) * describing why a volume is to be kept offline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3875) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3876) static unsigned char hpsa_volume_offline(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3877) unsigned char scsi3addr[])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3879) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3880) unsigned char *sense;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3881) u8 sense_key, asc, ascq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3882) int sense_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3883) int rc, ldstat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3884) u16 cmd_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3885) u8 scsi_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3886) #define ASC_LUN_NOT_READY 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3887) #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3888) #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3890) c = cmd_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3892) (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3893) rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3894) NO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3895) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3896) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3897) return HPSA_VPD_LV_STATUS_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3899) sense = c->err_info->SenseInfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3900) if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3901) sense_len = sizeof(c->err_info->SenseInfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3902) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3903) sense_len = c->err_info->SenseLen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3904) decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3905) cmd_status = c->err_info->CommandStatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3906) scsi_status = c->err_info->ScsiStatus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3907) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3909) /* Determine the reason for not ready state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3910) ldstat = hpsa_get_volume_status(h, scsi3addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3912) /* Keep volume offline in certain cases: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3913) switch (ldstat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3914) case HPSA_LV_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3915) case HPSA_LV_UNDERGOING_ERASE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3916) case HPSA_LV_NOT_AVAILABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3917) case HPSA_LV_UNDERGOING_RPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3918) case HPSA_LV_PENDING_RPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3919) case HPSA_LV_ENCRYPTED_NO_KEY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3920) case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3921) case HPSA_LV_UNDERGOING_ENCRYPTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3922) case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3923) case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3924) return ldstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3925) case HPSA_VPD_LV_STATUS_UNSUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3926) /* If VPD status page isn't available,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3927) * use ASC/ASCQ to determine state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3928) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3929) if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3930) (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3931) return ldstat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3932) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3933) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3934) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3935) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3936) return HPSA_LV_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3938)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3939) static int hpsa_update_device_info(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3940) unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3941) unsigned char *is_OBDR_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3944) #define OBDR_SIG_OFFSET 43
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3945) #define OBDR_TAPE_SIG "$DR-10"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3946) #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3947) #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3949) unsigned char *inq_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3950) unsigned char *obdr_sig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3951) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3953) inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3954) if (!inq_buff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3955) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3956) goto bail_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3959) /* Do an inquiry to the device to see what it is. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3960) if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3961) (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3962) dev_err(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3963) "%s: inquiry failed, device will be skipped.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3964) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3965) rc = HPSA_INQUIRY_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3966) goto bail_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3969) scsi_sanitize_inquiry_string(&inq_buff[8], 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3970) scsi_sanitize_inquiry_string(&inq_buff[16], 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3971)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3972) this_device->devtype = (inq_buff[0] & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3973) memcpy(this_device->scsi3addr, scsi3addr, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3974) memcpy(this_device->vendor, &inq_buff[8],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3975) sizeof(this_device->vendor));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3976) memcpy(this_device->model, &inq_buff[16],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3977) sizeof(this_device->model));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3978) this_device->rev = inq_buff[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3979) memset(this_device->device_id, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3980) sizeof(this_device->device_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3981) if (hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3982) sizeof(this_device->device_id)) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3983) dev_err(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3984) "hpsa%d: %s: can't get device id for [%d:%d:%d:%d]\t%s\t%.16s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3985) h->ctlr, __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3986) h->scsi_host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3987) this_device->bus, this_device->target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3988) this_device->lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3989) scsi_device_type(this_device->devtype),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3990) this_device->model);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3991) rc = HPSA_LV_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3992) goto bail_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3995) if ((this_device->devtype == TYPE_DISK ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3996) this_device->devtype == TYPE_ZBC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3997) is_logical_dev_addr_mode(scsi3addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3998) unsigned char volume_offline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4000) hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4001) if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4002) hpsa_get_ioaccel_status(h, scsi3addr, this_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4003) volume_offline = hpsa_volume_offline(h, scsi3addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4004) if (volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4005) h->legacy_board) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4006) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4007) * Legacy boards might not support volume status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4008) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4009) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4010) "C0:T%d:L%d Volume status not available, assuming online.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4011) this_device->target, this_device->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4012) volume_offline = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4014) this_device->volume_offline = volume_offline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4015) if (volume_offline == HPSA_LV_FAILED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4016) rc = HPSA_LV_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4017) dev_err(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4018) "%s: LV failed, device will be skipped.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4019) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4020) goto bail_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4022) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4023) this_device->raid_level = RAID_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4024) this_device->offload_config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4025) hpsa_turn_off_ioaccel_for_device(this_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4026) this_device->hba_ioaccel_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4027) this_device->volume_offline = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4028) this_device->queue_depth = h->nr_cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4031) if (this_device->external)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4032) this_device->queue_depth = EXTERNAL_QD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4034) if (is_OBDR_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4035) /* See if this is a One-Button-Disaster-Recovery device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4036) * by looking for "$DR-10" at offset 43 in inquiry data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4038) obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4039) *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4040) strncmp(obdr_sig, OBDR_TAPE_SIG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4041) OBDR_SIG_LEN) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4043) kfree(inq_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4044) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4046) bail_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4047) kfree(inq_buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4048) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4051) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4052) * Helper function to assign bus, target, lun mapping of devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4053) * Logical drive target and lun are assigned at this time, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4054) * physical device lun and target assignment are deferred (assigned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4055) * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4056) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4057) static void figure_bus_target_lun(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4058) u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4059) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4060) u32 lunid = get_unaligned_le32(lunaddrbytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4062) if (!is_logical_dev_addr_mode(lunaddrbytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4063) /* physical device, target and lun filled in later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4064) if (is_hba_lunid(lunaddrbytes)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4065) int bus = HPSA_HBA_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4067) if (!device->rev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4068) bus = HPSA_LEGACY_HBA_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4069) hpsa_set_bus_target_lun(device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4070) bus, 0, lunid & 0x3fff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4071) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4072) /* defer target, lun assignment for physical devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4073) hpsa_set_bus_target_lun(device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4074) HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4075) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4076) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4077) /* It's a logical device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4078) if (device->external) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4079) hpsa_set_bus_target_lun(device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4080) HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4081) lunid & 0x00ff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4082) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4084) hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4085) 0, lunid & 0x3fff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4088) static int figure_external_status(struct ctlr_info *h, int raid_ctlr_position,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4089) int i, int nphysicals, int nlocal_logicals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4091) /* In report logicals, local logicals are listed first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4092) * then any externals.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4093) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4094) int logicals_start = nphysicals + (raid_ctlr_position == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4096) if (i == raid_ctlr_position)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4097) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4099) if (i < logicals_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4100) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4102) /* i is in logicals range, but still within local logicals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4103) if ((i - nphysicals - (raid_ctlr_position == 0)) < nlocal_logicals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4104) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4106) return 1; /* it's an external lun */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4110) * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4111) * logdev. The number of luns in physdev and logdev are returned in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4112) * *nphysicals and *nlogicals, respectively.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4113) * Returns 0 on success, -1 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4115) static int hpsa_gather_lun_info(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4116) struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4117) struct ReportLUNdata *logdev, u32 *nlogicals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4119) if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4120) dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4121) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4123) *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4124) if (*nphysicals > HPSA_MAX_PHYS_LUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4125) dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4126) HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4127) *nphysicals = HPSA_MAX_PHYS_LUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4129) if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4130) dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4131) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4133) *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4134) /* Reject Logicals in excess of our max capability. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4135) if (*nlogicals > HPSA_MAX_LUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4136) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4137) "maximum logical LUNs (%d) exceeded. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4138) "%d LUNs ignored.\n", HPSA_MAX_LUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4139) *nlogicals - HPSA_MAX_LUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4140) *nlogicals = HPSA_MAX_LUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4142) if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4143) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4144) "maximum logical + physical LUNs (%d) exceeded. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4145) "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4146) *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4147) *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4149) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4152) static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4153) int i, int nphysicals, int nlogicals,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4154) struct ReportExtendedLUNdata *physdev_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4155) struct ReportLUNdata *logdev_list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4157) /* Helper function, figure out where the LUN ID info is coming from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4158) * given index i, lists of physical and logical devices, where in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4159) * the list the raid controller is supposed to appear (first or last)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4162) int logicals_start = nphysicals + (raid_ctlr_position == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4163) int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4165) if (i == raid_ctlr_position)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4166) return RAID_CTLR_LUNID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4168) if (i < logicals_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4169) return &physdev_list->LUN[i -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4170) (raid_ctlr_position == 0)].lunid[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4172) if (i < last_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4173) return &logdev_list->LUN[i - nphysicals -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4174) (raid_ctlr_position == 0)][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4175) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4176) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4179) /* get physical drive ioaccel handle and queue depth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4180) static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4181) struct hpsa_scsi_dev_t *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4182) struct ReportExtendedLUNdata *rlep, int rle_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4183) struct bmic_identify_physical_device *id_phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4185) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4186) struct ext_report_lun_entry *rle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4188) if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4189) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4191) rle = &rlep->LUN[rle_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4193) dev->ioaccel_handle = rle->ioaccel_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4194) if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4195) dev->hba_ioaccel_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4196) memset(id_phys, 0, sizeof(*id_phys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4197) rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4198) GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4199) sizeof(*id_phys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4200) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4201) /* Reserve space for FW operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4202) #define DRIVE_CMDS_RESERVED_FOR_FW 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4203) #define DRIVE_QUEUE_DEPTH 7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4204) dev->queue_depth =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4205) le16_to_cpu(id_phys->current_queue_depth_limit) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4206) DRIVE_CMDS_RESERVED_FOR_FW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4207) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4208) dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4211) static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4212) struct ReportExtendedLUNdata *rlep, int rle_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4213) struct bmic_identify_physical_device *id_phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4215) struct ext_report_lun_entry *rle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4217) if (rle_index < 0 || rle_index >= HPSA_MAX_PHYS_LUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4218) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4220) rle = &rlep->LUN[rle_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4222) if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4223) this_device->hba_ioaccel_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4225) memcpy(&this_device->active_path_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4226) &id_phys->active_path_number,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4227) sizeof(this_device->active_path_index));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4228) memcpy(&this_device->path_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4229) &id_phys->redundant_path_present_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4230) sizeof(this_device->path_map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4231) memcpy(&this_device->box,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4232) &id_phys->alternate_paths_phys_box_on_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4233) sizeof(this_device->box));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4234) memcpy(&this_device->phys_connector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4235) &id_phys->alternate_paths_phys_connector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4236) sizeof(this_device->phys_connector));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4237) memcpy(&this_device->bay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4238) &id_phys->phys_bay_in_box,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4239) sizeof(this_device->bay));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4242) /* get number of local logical disks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4243) static int hpsa_set_local_logical_count(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4244) struct bmic_identify_controller *id_ctlr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4245) u32 *nlocals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4247) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4249) if (!id_ctlr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4250) dev_warn(&h->pdev->dev, "%s: id_ctlr buffer is NULL.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4251) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4252) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4254) memset(id_ctlr, 0, sizeof(*id_ctlr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4255) rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4256) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4257) if (id_ctlr->configured_logical_drive_count < 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4258) *nlocals = id_ctlr->configured_logical_drive_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4259) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4260) *nlocals = le16_to_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4261) id_ctlr->extended_logical_unit_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4262) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4263) *nlocals = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4264) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4267) static bool hpsa_is_disk_spare(struct ctlr_info *h, u8 *lunaddrbytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4269) struct bmic_identify_physical_device *id_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4270) bool is_spare = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4271) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4273) id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4274) if (!id_phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4275) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4277) rc = hpsa_bmic_id_physical_device(h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4278) lunaddrbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4279) GET_BMIC_DRIVE_NUMBER(lunaddrbytes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4280) id_phys, sizeof(*id_phys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4281) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4282) is_spare = (id_phys->more_flags >> 6) & 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4284) kfree(id_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4285) return is_spare;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4288) #define RPL_DEV_FLAG_NON_DISK 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4289) #define RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4290) #define RPL_DEV_FLAG_UNCONFIG_DISK 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4292) #define BMIC_DEVICE_TYPE_ENCLOSURE 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4294) static bool hpsa_skip_device(struct ctlr_info *h, u8 *lunaddrbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4295) struct ext_report_lun_entry *rle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4297) u8 device_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4298) u8 device_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4300) if (!MASKED_DEVICE(lunaddrbytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4301) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4303) device_flags = rle->device_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4304) device_type = rle->device_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4306) if (device_flags & RPL_DEV_FLAG_NON_DISK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4307) if (device_type == BMIC_DEVICE_TYPE_ENCLOSURE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4308) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4309) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4312) if (!(device_flags & RPL_DEV_FLAG_UNCONFIG_DISK_REPORTING_SUPPORTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4313) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4315) if (device_flags & RPL_DEV_FLAG_UNCONFIG_DISK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4316) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4319) * Spares may be spun down, we do not want to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4320) * do an Inquiry to a RAID set spare drive as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4321) * that would have them spun up, that is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4322) * performance hit because I/O to the RAID device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4323) * stops while the spin up occurs which can take
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4324) * over 50 seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4325) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4326) if (hpsa_is_disk_spare(h, lunaddrbytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4327) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4329) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4332) static void hpsa_update_scsi_devices(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4334) /* the idea here is we could get notified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4335) * that some devices have changed, so we do a report
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4336) * physical luns and report logical luns cmd, and adjust
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4337) * our list of devices accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4338) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4339) * The scsi3addr's of devices won't change so long as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4340) * adapter is not reset. That means we can rescan and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4341) * tell which devices we already know about, vs. new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4342) * devices, vs. disappearing devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4344) struct ReportExtendedLUNdata *physdev_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4345) struct ReportLUNdata *logdev_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4346) struct bmic_identify_physical_device *id_phys = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4347) struct bmic_identify_controller *id_ctlr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4348) u32 nphysicals = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4349) u32 nlogicals = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4350) u32 nlocal_logicals = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4351) u32 ndev_allocated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4352) struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4353) int ncurrent = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4354) int i, n_ext_target_devs, ndevs_to_allocate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4355) int raid_ctlr_position;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4356) bool physical_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4357) DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4359) currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4360) physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4361) logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4362) tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4363) id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4364) id_ctlr = kzalloc(sizeof(*id_ctlr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4366) if (!currentsd || !physdev_list || !logdev_list ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4367) !tmpdevice || !id_phys || !id_ctlr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4368) dev_err(&h->pdev->dev, "out of memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4369) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4371) memset(lunzerobits, 0, sizeof(lunzerobits));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4373) h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4375) if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4376) logdev_list, &nlogicals)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4377) h->drv_req_rescan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4378) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4381) /* Set number of local logicals (non PTRAID) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4382) if (hpsa_set_local_logical_count(h, id_ctlr, &nlocal_logicals)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4383) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4384) "%s: Can't determine number of local logical devices.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4385) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4388) /* We might see up to the maximum number of logical and physical disks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4389) * plus external target devices, and a device for the local RAID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4390) * controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4391) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4392) ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4394) hpsa_ext_ctrl_present(h, physdev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4396) /* Allocate the per device structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4397) for (i = 0; i < ndevs_to_allocate; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4398) if (i >= HPSA_MAX_DEVICES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4399) dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4400) " %d devices ignored.\n", HPSA_MAX_DEVICES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4401) ndevs_to_allocate - HPSA_MAX_DEVICES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4402) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4405) currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4406) if (!currentsd[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4407) h->drv_req_rescan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4408) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4410) ndev_allocated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4413) if (is_scsi_rev_5(h))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4414) raid_ctlr_position = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4415) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4416) raid_ctlr_position = nphysicals + nlogicals;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4418) /* adjust our table of devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4419) n_ext_target_devs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4420) for (i = 0; i < nphysicals + nlogicals + 1; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4421) u8 *lunaddrbytes, is_OBDR = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4422) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4423) int phys_dev_index = i - (raid_ctlr_position == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4424) bool skip_device = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4426) memset(tmpdevice, 0, sizeof(*tmpdevice));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4428) physical_device = i < nphysicals + (raid_ctlr_position == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4430) /* Figure out where the LUN ID info is coming from */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4431) lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4432) i, nphysicals, nlogicals, physdev_list, logdev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4434) /* Determine if this is a lun from an external target array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4435) tmpdevice->external =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4436) figure_external_status(h, raid_ctlr_position, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4437) nphysicals, nlocal_logicals);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4439) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4440) * Skip over some devices such as a spare.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4441) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4442) if (phys_dev_index >= 0 && !tmpdevice->external &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4443) physical_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4444) skip_device = hpsa_skip_device(h, lunaddrbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4445) &physdev_list->LUN[phys_dev_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4446) if (skip_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4447) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4450) /* Get device type, vendor, model, device id, raid_map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4451) rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4452) &is_OBDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4453) if (rc == -ENOMEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4454) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4455) "Out of memory, rescan deferred.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4456) h->drv_req_rescan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4457) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4459) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4460) h->drv_req_rescan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4461) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4464) figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4465) this_device = currentsd[ncurrent];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4467) *this_device = *tmpdevice;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4468) this_device->physical_device = physical_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4470) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4471) * Expose all devices except for physical devices that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4472) * are masked.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4473) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4474) if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4475) this_device->expose_device = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4476) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4477) this_device->expose_device = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4480) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4481) * Get the SAS address for physical devices that are exposed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4483) if (this_device->physical_device && this_device->expose_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4484) hpsa_get_sas_address(h, lunaddrbytes, this_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4486) switch (this_device->devtype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4487) case TYPE_ROM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4488) /* We don't *really* support actual CD-ROM devices,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4489) * just "One Button Disaster Recovery" tape drive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4490) * which temporarily pretends to be a CD-ROM drive.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4491) * So we check that the device is really an OBDR tape
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4492) * device by checking for "$DR-10" in bytes 43-48 of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4493) * the inquiry data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4495) if (is_OBDR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4496) ncurrent++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4497) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4498) case TYPE_DISK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4499) case TYPE_ZBC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4500) if (this_device->physical_device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4501) /* The disk is in HBA mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4502) /* Never use RAID mapper in HBA mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4503) this_device->offload_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4504) hpsa_get_ioaccel_drive_info(h, this_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4505) physdev_list, phys_dev_index, id_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4506) hpsa_get_path_info(this_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4507) physdev_list, phys_dev_index, id_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4509) ncurrent++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4510) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4511) case TYPE_TAPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4512) case TYPE_MEDIUM_CHANGER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4513) ncurrent++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4514) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4515) case TYPE_ENCLOSURE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4516) if (!this_device->external)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4517) hpsa_get_enclosure_info(h, lunaddrbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4518) physdev_list, phys_dev_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4519) this_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4520) ncurrent++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4521) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4522) case TYPE_RAID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4523) /* Only present the Smartarray HBA as a RAID controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4524) * If it's a RAID controller other than the HBA itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4525) * (an external RAID controller, MSA500 or similar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4526) * don't present it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4527) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4528) if (!is_hba_lunid(lunaddrbytes))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4529) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4530) ncurrent++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4531) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4532) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4533) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4535) if (ncurrent >= HPSA_MAX_DEVICES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4536) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4539) if (h->sas_host == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4540) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4542) rc = hpsa_add_sas_host(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4543) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4544) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4545) "Could not add sas host %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4546) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4550) adjust_hpsa_scsi_table(h, currentsd, ncurrent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4551) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4552) kfree(tmpdevice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4553) for (i = 0; i < ndev_allocated; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4554) kfree(currentsd[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4555) kfree(currentsd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4556) kfree(physdev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4557) kfree(logdev_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4558) kfree(id_ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4559) kfree(id_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4562) static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4563) struct scatterlist *sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4565) u64 addr64 = (u64) sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4566) unsigned int len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4568) desc->Addr = cpu_to_le64(addr64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4569) desc->Len = cpu_to_le32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4570) desc->Ext = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4573) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4574) * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4575) * dma mapping and fills in the scatter gather entries of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4576) * hpsa command, cp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4577) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4578) static int hpsa_scatter_gather(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4579) struct CommandList *cp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4580) struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4582) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4583) int use_sg, i, sg_limit, chained, last_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4584) struct SGDescriptor *curr_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4586) BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4588) use_sg = scsi_dma_map(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4589) if (use_sg < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4590) return use_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4592) if (!use_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4593) goto sglist_finished;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4595) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4596) * If the number of entries is greater than the max for a single list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4597) * then we have a chained list; we will set up all but one entry in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4598) * first list (the last entry is saved for link information);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4599) * otherwise, we don't have a chained list and we'll set up at each of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4600) * the entries in the one list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4601) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4602) curr_sg = cp->SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4603) chained = use_sg > h->max_cmd_sg_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4604) sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4605) last_sg = scsi_sg_count(cmd) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4606) scsi_for_each_sg(cmd, sg, sg_limit, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4607) hpsa_set_sg_descriptor(curr_sg, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4608) curr_sg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4611) if (chained) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4612) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4613) * Continue with the chained list. Set curr_sg to the chained
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4614) * list. Modify the limit to the total count less the entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4615) * we've already set up. Resume the scan at the list entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4616) * where the previous loop left off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4618) curr_sg = h->cmd_sg_list[cp->cmdindex];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4619) sg_limit = use_sg - sg_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4620) for_each_sg(sg, sg, sg_limit, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4621) hpsa_set_sg_descriptor(curr_sg, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4622) curr_sg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4626) /* Back the pointer up to the last entry and mark it as "last". */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4627) (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4629) if (use_sg + chained > h->maxSG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4630) h->maxSG = use_sg + chained;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4632) if (chained) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4633) cp->Header.SGList = h->max_cmd_sg_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4634) cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4635) if (hpsa_map_sg_chain_block(h, cp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4636) scsi_dma_unmap(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4637) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4639) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4640) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4642) sglist_finished:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4644) cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4645) cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4646) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4649) static inline void warn_zero_length_transfer(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4650) u8 *cdb, int cdb_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4651) const char *func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4653) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4654) "%s: Blocking zero-length request: CDB:%*phN\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4655) func, cdb_len, cdb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4658) #define IO_ACCEL_INELIGIBLE 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4659) /* zero-length transfers trigger hardware errors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4660) static bool is_zero_length_transfer(u8 *cdb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4662) u32 block_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4664) /* Block zero-length transfer sizes on certain commands. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4665) switch (cdb[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4666) case READ_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4667) case WRITE_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4668) case VERIFY: /* 0x2F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4669) case WRITE_VERIFY: /* 0x2E */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4670) block_cnt = get_unaligned_be16(&cdb[7]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4671) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4672) case READ_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4673) case WRITE_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4674) case VERIFY_12: /* 0xAF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4675) case WRITE_VERIFY_12: /* 0xAE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4676) block_cnt = get_unaligned_be32(&cdb[6]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4677) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4678) case READ_16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4679) case WRITE_16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4680) case VERIFY_16: /* 0x8F */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4681) block_cnt = get_unaligned_be32(&cdb[10]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4682) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4683) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4684) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4686)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4687) return block_cnt == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4690) static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4692) int is_write = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4693) u32 block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4694) u32 block_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4696) /* Perform some CDB fixups if needed using 10 byte reads/writes only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4697) switch (cdb[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4698) case WRITE_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4699) case WRITE_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4700) is_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4701) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4702) case READ_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4703) case READ_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4704) if (*cdb_len == 6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4705) block = (((cdb[1] & 0x1F) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4706) (cdb[2] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4707) cdb[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4708) block_cnt = cdb[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4709) if (block_cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4710) block_cnt = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4711) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4712) BUG_ON(*cdb_len != 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4713) block = get_unaligned_be32(&cdb[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4714) block_cnt = get_unaligned_be32(&cdb[6]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4716) if (block_cnt > 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4717) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4719) cdb[0] = is_write ? WRITE_10 : READ_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4720) cdb[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4721) cdb[2] = (u8) (block >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4722) cdb[3] = (u8) (block >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4723) cdb[4] = (u8) (block >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4724) cdb[5] = (u8) (block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4725) cdb[6] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4726) cdb[7] = (u8) (block_cnt >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4727) cdb[8] = (u8) (block_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4728) cdb[9] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4729) *cdb_len = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4730) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4731) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4732) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4735) static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4736) struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4737) u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4738) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4739) struct scsi_cmnd *cmd = c->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4740) struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4741) unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4742) unsigned int total_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4743) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4744) u64 addr64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4745) int use_sg, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4746) struct SGDescriptor *curr_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4747) u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4749) /* TODO: implement chaining support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4750) if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4751) atomic_dec(&phys_disk->ioaccel_cmds_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4752) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4755) BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4757) if (is_zero_length_transfer(cdb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4758) warn_zero_length_transfer(h, cdb, cdb_len, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4759) atomic_dec(&phys_disk->ioaccel_cmds_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4760) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4763) if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4764) atomic_dec(&phys_disk->ioaccel_cmds_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4765) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4768) c->cmd_type = CMD_IOACCEL1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4770) /* Adjust the DMA address to point to the accelerated command buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4771) c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4772) (c->cmdindex * sizeof(*cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4773) BUG_ON(c->busaddr & 0x0000007F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4775) use_sg = scsi_dma_map(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4776) if (use_sg < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4777) atomic_dec(&phys_disk->ioaccel_cmds_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4778) return use_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4781) if (use_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4782) curr_sg = cp->SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4783) scsi_for_each_sg(cmd, sg, use_sg, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4784) addr64 = (u64) sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4785) len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4786) total_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4787) curr_sg->Addr = cpu_to_le64(addr64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4788) curr_sg->Len = cpu_to_le32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4789) curr_sg->Ext = cpu_to_le32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4790) curr_sg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4792) (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4794) switch (cmd->sc_data_direction) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4795) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4796) control |= IOACCEL1_CONTROL_DATA_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4797) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4798) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4799) control |= IOACCEL1_CONTROL_DATA_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4800) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4801) case DMA_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4802) control |= IOACCEL1_CONTROL_NODATAXFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4803) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4804) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4805) dev_err(&h->pdev->dev, "unknown data direction: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4806) cmd->sc_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4807) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4808) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4810) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4811) control |= IOACCEL1_CONTROL_NODATAXFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4814) c->Header.SGList = use_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4815) /* Fill out the command structure to submit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4816) cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4817) cp->transfer_len = cpu_to_le32(total_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4818) cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4819) (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4820) cp->control = cpu_to_le32(control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4821) memcpy(cp->CDB, cdb, cdb_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4822) memcpy(cp->CISS_LUN, scsi3addr, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4823) /* Tag was already set at init time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4824) enqueue_cmd_and_start_io(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4825) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4828) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4829) * Queue a command directly to a device behind the controller using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4830) * I/O accelerator path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4831) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4832) static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4833) struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4835) struct scsi_cmnd *cmd = c->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4836) struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4838) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4839) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4841) c->phys_disk = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4843) if (dev->in_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4844) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4846) return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4847) cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4850) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4851) * Set encryption parameters for the ioaccel2 request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4852) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4853) static void set_encrypt_ioaccel2(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4854) struct CommandList *c, struct io_accel2_cmd *cp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4856) struct scsi_cmnd *cmd = c->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4857) struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4858) struct raid_map_data *map = &dev->raid_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4859) u64 first_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4861) /* Are we doing encryption on this device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4862) if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4863) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4864) /* Set the data encryption key index. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4865) cp->dekindex = map->dekindex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4867) /* Set the encryption enable flag, encoded into direction field. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4868) cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4870) /* Set encryption tweak values based on logical block address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4871) * If block size is 512, tweak value is LBA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4872) * For other block sizes, tweak is (LBA * block size)/ 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4873) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4874) switch (cmd->cmnd[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4875) /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4876) case READ_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4877) case WRITE_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4878) first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4879) (cmd->cmnd[2] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4880) cmd->cmnd[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4881) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4882) case WRITE_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4883) case READ_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4884) /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4885) case WRITE_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4886) case READ_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4887) first_block = get_unaligned_be32(&cmd->cmnd[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4888) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4889) case WRITE_16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4890) case READ_16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4891) first_block = get_unaligned_be64(&cmd->cmnd[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4892) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4893) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4894) dev_err(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4895) "ERROR: %s: size (0x%x) not supported for encryption\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4896) __func__, cmd->cmnd[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4897) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4898) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4901) if (le32_to_cpu(map->volume_blk_size) != 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4902) first_block = first_block *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4903) le32_to_cpu(map->volume_blk_size)/512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4904)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4905) cp->tweak_lower = cpu_to_le32(first_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4906) cp->tweak_upper = cpu_to_le32(first_block >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4909) static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4910) struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4911) u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4913) struct scsi_cmnd *cmd = c->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4914) struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4915) struct ioaccel2_sg_element *curr_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4916) int use_sg, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4917) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4918) u64 addr64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4919) u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4920) u32 total_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4922) if (!cmd->device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4923) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4925) if (!cmd->device->hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4926) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4928) BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4930) if (is_zero_length_transfer(cdb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4931) warn_zero_length_transfer(h, cdb, cdb_len, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4932) atomic_dec(&phys_disk->ioaccel_cmds_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4933) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4936) if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4937) atomic_dec(&phys_disk->ioaccel_cmds_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4938) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4941) c->cmd_type = CMD_IOACCEL2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4942) /* Adjust the DMA address to point to the accelerated command buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4943) c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4944) (c->cmdindex * sizeof(*cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4945) BUG_ON(c->busaddr & 0x0000007F);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4947) memset(cp, 0, sizeof(*cp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4948) cp->IU_type = IOACCEL2_IU_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4950) use_sg = scsi_dma_map(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4951) if (use_sg < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4952) atomic_dec(&phys_disk->ioaccel_cmds_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4953) return use_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4956) if (use_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4957) curr_sg = cp->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4958) if (use_sg > h->ioaccel_maxsg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4959) addr64 = le64_to_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4960) h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4961) curr_sg->address = cpu_to_le64(addr64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4962) curr_sg->length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4963) curr_sg->reserved[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4964) curr_sg->reserved[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4965) curr_sg->reserved[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4966) curr_sg->chain_indicator = IOACCEL2_CHAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4968) curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4970) scsi_for_each_sg(cmd, sg, use_sg, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4971) addr64 = (u64) sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4972) len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4973) total_len += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4974) curr_sg->address = cpu_to_le64(addr64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4975) curr_sg->length = cpu_to_le32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4976) curr_sg->reserved[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4977) curr_sg->reserved[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4978) curr_sg->reserved[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4979) curr_sg->chain_indicator = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4980) curr_sg++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4983) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4984) * Set the last s/g element bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4985) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4986) (curr_sg - 1)->chain_indicator = IOACCEL2_LAST_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4988) switch (cmd->sc_data_direction) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4989) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4990) cp->direction &= ~IOACCEL2_DIRECTION_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4991) cp->direction |= IOACCEL2_DIR_DATA_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4992) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4993) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4994) cp->direction &= ~IOACCEL2_DIRECTION_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4995) cp->direction |= IOACCEL2_DIR_DATA_IN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4996) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4997) case DMA_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4998) cp->direction &= ~IOACCEL2_DIRECTION_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4999) cp->direction |= IOACCEL2_DIR_NO_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5000) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5001) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5002) dev_err(&h->pdev->dev, "unknown data direction: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5003) cmd->sc_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5004) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5005) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5007) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5008) cp->direction &= ~IOACCEL2_DIRECTION_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5009) cp->direction |= IOACCEL2_DIR_NO_DATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5012) /* Set encryption parameters, if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5013) set_encrypt_ioaccel2(h, c, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5015) cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5016) cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5017) memcpy(cp->cdb, cdb, sizeof(cp->cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5019) cp->data_len = cpu_to_le32(total_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5020) cp->err_ptr = cpu_to_le64(c->busaddr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5021) offsetof(struct io_accel2_cmd, error_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5022) cp->err_len = cpu_to_le32(sizeof(cp->error_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5024) /* fill in sg elements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5025) if (use_sg > h->ioaccel_maxsg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5026) cp->sg_count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5027) cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5028) if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5029) atomic_dec(&phys_disk->ioaccel_cmds_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5030) scsi_dma_unmap(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5031) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5033) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5034) cp->sg_count = (u8) use_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5036) if (phys_disk->in_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5037) cmd->result = DID_RESET << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5038) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5041) enqueue_cmd_and_start_io(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5042) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5043) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5045) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5046) * Queue a command to the correct I/O accelerator path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5048) static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5049) struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5050) u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5052) if (!c->scsi_cmd->device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5053) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5054)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5055) if (!c->scsi_cmd->device->hostdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5056) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5058) if (phys_disk->in_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5059) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5061) /* Try to honor the device's queue depth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5062) if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5063) phys_disk->queue_depth) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5064) atomic_dec(&phys_disk->ioaccel_cmds_out);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5065) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5066) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5067) if (h->transMethod & CFGTBL_Trans_io_accel1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5068) return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5069) cdb, cdb_len, scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5070) phys_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5071) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5072) return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5073) cdb, cdb_len, scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5074) phys_disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5077) static void raid_map_helper(struct raid_map_data *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5078) int offload_to_mirror, u32 *map_index, u32 *current_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5080) if (offload_to_mirror == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5081) /* use physical disk in the first mirrored group. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5082) *map_index %= le16_to_cpu(map->data_disks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5083) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5085) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5086) /* determine mirror group that *map_index indicates */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5087) *current_group = *map_index /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5088) le16_to_cpu(map->data_disks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5089) if (offload_to_mirror == *current_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5090) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5091) if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5092) /* select map index from next group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5093) *map_index += le16_to_cpu(map->data_disks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5094) (*current_group)++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5095) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5096) /* select map index from first group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5097) *map_index %= le16_to_cpu(map->data_disks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5098) *current_group = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5100) } while (offload_to_mirror != *current_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5104) * Attempt to perform offload RAID mapping for a logical volume I/O.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5106) static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5107) struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5109) struct scsi_cmnd *cmd = c->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5110) struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5111) struct raid_map_data *map = &dev->raid_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5112) struct raid_map_disk_data *dd = &map->data[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5113) int is_write = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5114) u32 map_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5115) u64 first_block, last_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5116) u32 block_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5117) u32 blocks_per_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5118) u64 first_row, last_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5119) u32 first_row_offset, last_row_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5120) u32 first_column, last_column;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5121) u64 r0_first_row, r0_last_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5122) u32 r5or6_blocks_per_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5123) u64 r5or6_first_row, r5or6_last_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5124) u32 r5or6_first_row_offset, r5or6_last_row_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5125) u32 r5or6_first_column, r5or6_last_column;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5126) u32 total_disks_per_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5127) u32 stripesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5128) u32 first_group, last_group, current_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5129) u32 map_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5130) u32 disk_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5131) u64 disk_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5132) u32 disk_block_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5133) u8 cdb[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5134) u8 cdb_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5135) u16 strip_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5136) #if BITS_PER_LONG == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5137) u64 tmpdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5138) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5139) int offload_to_mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5141) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5142) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5144) if (dev->in_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5145) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5147) /* check for valid opcode, get LBA and block count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5148) switch (cmd->cmnd[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5149) case WRITE_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5150) is_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5151) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5152) case READ_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5153) first_block = (((cmd->cmnd[1] & 0x1F) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5154) (cmd->cmnd[2] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5155) cmd->cmnd[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5156) block_cnt = cmd->cmnd[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5157) if (block_cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5158) block_cnt = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5159) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5160) case WRITE_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5161) is_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5162) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5163) case READ_10:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5164) first_block =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5165) (((u64) cmd->cmnd[2]) << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5166) (((u64) cmd->cmnd[3]) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5167) (((u64) cmd->cmnd[4]) << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5168) cmd->cmnd[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5169) block_cnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5170) (((u32) cmd->cmnd[7]) << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5171) cmd->cmnd[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5172) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5173) case WRITE_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5174) is_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5175) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5176) case READ_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5177) first_block =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5178) (((u64) cmd->cmnd[2]) << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5179) (((u64) cmd->cmnd[3]) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5180) (((u64) cmd->cmnd[4]) << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5181) cmd->cmnd[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5182) block_cnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5183) (((u32) cmd->cmnd[6]) << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5184) (((u32) cmd->cmnd[7]) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5185) (((u32) cmd->cmnd[8]) << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5186) cmd->cmnd[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5187) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5188) case WRITE_16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5189) is_write = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5190) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5191) case READ_16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5192) first_block =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5193) (((u64) cmd->cmnd[2]) << 56) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5194) (((u64) cmd->cmnd[3]) << 48) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5195) (((u64) cmd->cmnd[4]) << 40) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5196) (((u64) cmd->cmnd[5]) << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5197) (((u64) cmd->cmnd[6]) << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5198) (((u64) cmd->cmnd[7]) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5199) (((u64) cmd->cmnd[8]) << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5200) cmd->cmnd[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5201) block_cnt =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5202) (((u32) cmd->cmnd[10]) << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5203) (((u32) cmd->cmnd[11]) << 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5204) (((u32) cmd->cmnd[12]) << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5205) cmd->cmnd[13];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5206) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5207) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5208) return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5210) last_block = first_block + block_cnt - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5212) /* check for write to non-RAID-0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5213) if (is_write && dev->raid_level != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5214) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5216) /* check for invalid block or wraparound */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5217) if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5218) last_block < first_block)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5219) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5221) /* calculate stripe information for the request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5222) blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5223) le16_to_cpu(map->strip_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5224) strip_size = le16_to_cpu(map->strip_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5225) #if BITS_PER_LONG == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5226) tmpdiv = first_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5227) (void) do_div(tmpdiv, blocks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5228) first_row = tmpdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5229) tmpdiv = last_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5230) (void) do_div(tmpdiv, blocks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5231) last_row = tmpdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5232) first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5233) last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5234) tmpdiv = first_row_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5235) (void) do_div(tmpdiv, strip_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5236) first_column = tmpdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5237) tmpdiv = last_row_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5238) (void) do_div(tmpdiv, strip_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5239) last_column = tmpdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5240) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5241) first_row = first_block / blocks_per_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5242) last_row = last_block / blocks_per_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5243) first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5244) last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5245) first_column = first_row_offset / strip_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5246) last_column = last_row_offset / strip_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5247) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5249) /* if this isn't a single row/column then give to the controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5250) if ((first_row != last_row) || (first_column != last_column))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5251) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5253) /* proceeding with driver mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5254) total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5255) le16_to_cpu(map->metadata_disks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5256) map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5257) le16_to_cpu(map->row_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5258) map_index = (map_row * total_disks_per_row) + first_column;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5260) switch (dev->raid_level) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5261) case HPSA_RAID_0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5262) break; /* nothing special to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5263) case HPSA_RAID_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5264) /* Handles load balance across RAID 1 members.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5265) * (2-drive R1 and R10 with even # of drives.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5266) * Appropriate for SSDs, not optimal for HDDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5267) * Ensure we have the correct raid_map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5269) if (le16_to_cpu(map->layout_map_count) != 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5270) hpsa_turn_off_ioaccel_for_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5271) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5273) if (dev->offload_to_mirror)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5274) map_index += le16_to_cpu(map->data_disks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5275) dev->offload_to_mirror = !dev->offload_to_mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5276) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5277) case HPSA_RAID_ADM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5278) /* Handles N-way mirrors (R1-ADM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5279) * and R10 with # of drives divisible by 3.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5280) * Ensure we have the correct raid_map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5282) if (le16_to_cpu(map->layout_map_count) != 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5283) hpsa_turn_off_ioaccel_for_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5284) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5287) offload_to_mirror = dev->offload_to_mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5288) raid_map_helper(map, offload_to_mirror,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5289) &map_index, ¤t_group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5290) /* set mirror group to use next time */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5291) offload_to_mirror =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5292) (offload_to_mirror >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5293) le16_to_cpu(map->layout_map_count) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5294) ? 0 : offload_to_mirror + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5295) dev->offload_to_mirror = offload_to_mirror;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5296) /* Avoid direct use of dev->offload_to_mirror within this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5297) * function since multiple threads might simultaneously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5298) * increment it beyond the range of dev->layout_map_count -1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5299) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5300) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5301) case HPSA_RAID_5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5302) case HPSA_RAID_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5303) if (le16_to_cpu(map->layout_map_count) <= 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5304) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5306) /* Verify first and last block are in same RAID group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5307) r5or6_blocks_per_row =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5308) le16_to_cpu(map->strip_size) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5309) le16_to_cpu(map->data_disks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5310) if (r5or6_blocks_per_row == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5311) hpsa_turn_off_ioaccel_for_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5312) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5314) stripesize = r5or6_blocks_per_row *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5315) le16_to_cpu(map->layout_map_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5316) #if BITS_PER_LONG == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5317) tmpdiv = first_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5318) first_group = do_div(tmpdiv, stripesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5319) tmpdiv = first_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5320) (void) do_div(tmpdiv, r5or6_blocks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5321) first_group = tmpdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5322) tmpdiv = last_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5323) last_group = do_div(tmpdiv, stripesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5324) tmpdiv = last_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5325) (void) do_div(tmpdiv, r5or6_blocks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5326) last_group = tmpdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5327) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5328) first_group = (first_block % stripesize) / r5or6_blocks_per_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5329) last_group = (last_block % stripesize) / r5or6_blocks_per_row;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5330) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5331) if (first_group != last_group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5332) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5334) /* Verify request is in a single row of RAID 5/6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5335) #if BITS_PER_LONG == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5336) tmpdiv = first_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5337) (void) do_div(tmpdiv, stripesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5338) first_row = r5or6_first_row = r0_first_row = tmpdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5339) tmpdiv = last_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5340) (void) do_div(tmpdiv, stripesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5341) r5or6_last_row = r0_last_row = tmpdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5342) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5343) first_row = r5or6_first_row = r0_first_row =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5344) first_block / stripesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5345) r5or6_last_row = r0_last_row = last_block / stripesize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5346) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5347) if (r5or6_first_row != r5or6_last_row)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5348) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5351) /* Verify request is in a single column */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5352) #if BITS_PER_LONG == 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5353) tmpdiv = first_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5354) first_row_offset = do_div(tmpdiv, stripesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5355) tmpdiv = first_row_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5356) first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5357) r5or6_first_row_offset = first_row_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5358) tmpdiv = last_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5359) r5or6_last_row_offset = do_div(tmpdiv, stripesize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5360) tmpdiv = r5or6_last_row_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5361) r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5362) tmpdiv = r5or6_first_row_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5363) (void) do_div(tmpdiv, map->strip_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5364) first_column = r5or6_first_column = tmpdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5365) tmpdiv = r5or6_last_row_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5366) (void) do_div(tmpdiv, map->strip_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5367) r5or6_last_column = tmpdiv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5368) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5369) first_row_offset = r5or6_first_row_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5370) (u32)((first_block % stripesize) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5371) r5or6_blocks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5373) r5or6_last_row_offset =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5374) (u32)((last_block % stripesize) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5375) r5or6_blocks_per_row);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5377) first_column = r5or6_first_column =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5378) r5or6_first_row_offset / le16_to_cpu(map->strip_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5379) r5or6_last_column =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5380) r5or6_last_row_offset / le16_to_cpu(map->strip_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5381) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5382) if (r5or6_first_column != r5or6_last_column)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5383) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5385) /* Request is eligible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5386) map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5387) le16_to_cpu(map->row_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5389) map_index = (first_group *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5390) (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5391) (map_row * total_disks_per_row) + first_column;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5392) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5393) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5394) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5397) if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5398) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5400) c->phys_disk = dev->phys_disk[map_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5401) if (!c->phys_disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5402) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5404) disk_handle = dd[map_index].ioaccel_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5405) disk_block = le64_to_cpu(map->disk_starting_blk) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5406) first_row * le16_to_cpu(map->strip_size) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5407) (first_row_offset - first_column *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5408) le16_to_cpu(map->strip_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5409) disk_block_cnt = block_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5411) /* handle differing logical/physical block sizes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5412) if (map->phys_blk_shift) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5413) disk_block <<= map->phys_blk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5414) disk_block_cnt <<= map->phys_blk_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5416) BUG_ON(disk_block_cnt > 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5418) /* build the new CDB for the physical disk I/O */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5419) if (disk_block > 0xffffffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5420) cdb[0] = is_write ? WRITE_16 : READ_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5421) cdb[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5422) cdb[2] = (u8) (disk_block >> 56);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5423) cdb[3] = (u8) (disk_block >> 48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5424) cdb[4] = (u8) (disk_block >> 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5425) cdb[5] = (u8) (disk_block >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5426) cdb[6] = (u8) (disk_block >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5427) cdb[7] = (u8) (disk_block >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5428) cdb[8] = (u8) (disk_block >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5429) cdb[9] = (u8) (disk_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5430) cdb[10] = (u8) (disk_block_cnt >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5431) cdb[11] = (u8) (disk_block_cnt >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5432) cdb[12] = (u8) (disk_block_cnt >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5433) cdb[13] = (u8) (disk_block_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5434) cdb[14] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5435) cdb[15] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5436) cdb_len = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5437) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5438) cdb[0] = is_write ? WRITE_10 : READ_10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5439) cdb[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5440) cdb[2] = (u8) (disk_block >> 24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5441) cdb[3] = (u8) (disk_block >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5442) cdb[4] = (u8) (disk_block >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5443) cdb[5] = (u8) (disk_block);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5444) cdb[6] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5445) cdb[7] = (u8) (disk_block_cnt >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5446) cdb[8] = (u8) (disk_block_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5447) cdb[9] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5448) cdb_len = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5450) return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5451) dev->scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5452) dev->phys_disk[map_index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5456) * Submit commands down the "normal" RAID stack path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5457) * All callers to hpsa_ciss_submit must check lockup_detected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5458) * beforehand, before (opt.) and after calling cmd_alloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5460) static int hpsa_ciss_submit(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5461) struct CommandList *c, struct scsi_cmnd *cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5462) struct hpsa_scsi_dev_t *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5464) cmd->host_scribble = (unsigned char *) c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5465) c->cmd_type = CMD_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5466) c->scsi_cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5467) c->Header.ReplyQueue = 0; /* unused in simple mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5468) memcpy(&c->Header.LUN.LunAddrBytes[0], &dev->scsi3addr[0], 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5469) c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5471) /* Fill in the request block... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5473) c->Request.Timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5474) BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5475) c->Request.CDBLen = cmd->cmd_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5476) memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5477) switch (cmd->sc_data_direction) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5478) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5479) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5480) TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5481) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5482) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5483) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5484) TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5485) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5486) case DMA_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5487) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5488) TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5489) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5490) case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5491) /* This can happen if a buggy application does a scsi passthru
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5492) * and sets both inlen and outlen to non-zero. ( see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5493) * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5494) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5496) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5497) TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5498) /* This is technically wrong, and hpsa controllers should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5499) * reject it with CMD_INVALID, which is the most correct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5500) * response, but non-fibre backends appear to let it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5501) * slide by, and give the same results as if this field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5502) * were set correctly. Either way is acceptable for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5503) * our purposes here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5506) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5508) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5509) dev_err(&h->pdev->dev, "unknown data direction: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5510) cmd->sc_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5511) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5512) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5515) if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5516) hpsa_cmd_resolve_and_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5517) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5520) if (dev->in_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5521) hpsa_cmd_resolve_and_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5522) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5525) c->device = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5527) enqueue_cmd_and_start_io(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5528) /* the cmd'll come back via intr handler in complete_scsi_command() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5529) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5532) static void hpsa_cmd_init(struct ctlr_info *h, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5533) struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5535) dma_addr_t cmd_dma_handle, err_dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5537) /* Zero out all of commandlist except the last field, refcount */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5538) memset(c, 0, offsetof(struct CommandList, refcount));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5539) c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5540) cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5541) c->err_info = h->errinfo_pool + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5542) memset(c->err_info, 0, sizeof(*c->err_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5543) err_dma_handle = h->errinfo_pool_dhandle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5544) + index * sizeof(*c->err_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5545) c->cmdindex = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5546) c->busaddr = (u32) cmd_dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5547) c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5548) c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5549) c->h = h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5550) c->scsi_cmd = SCSI_CMD_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5553) static void hpsa_preinitialize_commands(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5555) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5557) for (i = 0; i < h->nr_cmds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5558) struct CommandList *c = h->cmd_pool + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5560) hpsa_cmd_init(h, i, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5561) atomic_set(&c->refcount, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5565) static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5566) struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5568) dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5570) BUG_ON(c->cmdindex != index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5572) memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5573) memset(c->err_info, 0, sizeof(*c->err_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5574) c->busaddr = (u32) cmd_dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5577) static int hpsa_ioaccel_submit(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5578) struct CommandList *c, struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5580) struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5581) int rc = IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5582)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5583) if (!dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5584) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5586) if (dev->in_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5587) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5589) if (hpsa_simple_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5590) return IO_ACCEL_INELIGIBLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5592) cmd->host_scribble = (unsigned char *) c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5594) if (dev->offload_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5595) hpsa_cmd_init(h, c->cmdindex, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5596) c->cmd_type = CMD_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5597) c->scsi_cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5598) c->device = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5599) rc = hpsa_scsi_ioaccel_raid_map(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5600) if (rc < 0) /* scsi_dma_map failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5601) rc = SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5602) } else if (dev->hba_ioaccel_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5603) hpsa_cmd_init(h, c->cmdindex, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5604) c->cmd_type = CMD_SCSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5605) c->scsi_cmd = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5606) c->device = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5607) rc = hpsa_scsi_ioaccel_direct_map(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5608) if (rc < 0) /* scsi_dma_map failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5609) rc = SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5611) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5614) static void hpsa_command_resubmit_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5616) struct scsi_cmnd *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5617) struct hpsa_scsi_dev_t *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5618) struct CommandList *c = container_of(work, struct CommandList, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5620) cmd = c->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5621) dev = cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5622) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5623) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5624) return hpsa_cmd_free_and_done(c->h, c, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5627) if (dev->in_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5628) cmd->result = DID_RESET << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5629) return hpsa_cmd_free_and_done(c->h, c, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5632) if (c->cmd_type == CMD_IOACCEL2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5633) struct ctlr_info *h = c->h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5634) struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5635) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5637) if (c2->error_data.serv_response ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5638) IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5639) rc = hpsa_ioaccel_submit(h, c, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5640) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5641) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5642) if (rc == SCSI_MLQUEUE_HOST_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5643) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5644) * If we get here, it means dma mapping failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5645) * Try again via scsi mid layer, which will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5646) * then get SCSI_MLQUEUE_HOST_BUSY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5647) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5648) cmd->result = DID_IMM_RETRY << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5649) return hpsa_cmd_free_and_done(h, c, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5651) /* else, fall thru and resubmit down CISS path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5654) hpsa_cmd_partial_init(c->h, c->cmdindex, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5655) if (hpsa_ciss_submit(c->h, c, cmd, dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5656) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5657) * If we get here, it means dma mapping failed. Try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5658) * again via scsi mid layer, which will then get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5659) * SCSI_MLQUEUE_HOST_BUSY.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5660) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5661) * hpsa_ciss_submit will have already freed c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5662) * if it encountered a dma mapping failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5664) cmd->result = DID_IMM_RETRY << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5665) cmd->scsi_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5667) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5669) /* Running in struct Scsi_Host->host_lock less mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5670) static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5672) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5673) struct hpsa_scsi_dev_t *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5674) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5675) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5677) /* Get the ptr to our adapter structure out of cmd->host. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5678) h = sdev_to_hba(cmd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5680) BUG_ON(cmd->request->tag < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5682) dev = cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5683) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5684) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5685) cmd->scsi_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5686) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5689) if (dev->removed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5690) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5691) cmd->scsi_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5692) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5695) if (unlikely(lockup_detected(h))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5696) cmd->result = DID_NO_CONNECT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5697) cmd->scsi_done(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5698) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5700)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5701) if (dev->in_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5702) return SCSI_MLQUEUE_DEVICE_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5704) c = cmd_tagged_alloc(h, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5705) if (c == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5706) return SCSI_MLQUEUE_DEVICE_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5708) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5709) * This is necessary because the SML doesn't zero out this field during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5710) * error recovery.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5711) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5712) cmd->result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5714) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5715) * Call alternate submit routine for I/O accelerated commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5716) * Retries always go down the normal I/O path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5717) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5718) if (likely(cmd->retries == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5719) !blk_rq_is_passthrough(cmd->request) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5720) h->acciopath_status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5721) rc = hpsa_ioaccel_submit(h, c, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5722) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5723) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5724) if (rc == SCSI_MLQUEUE_HOST_BUSY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5725) hpsa_cmd_resolve_and_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5726) return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5728) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5729) return hpsa_ciss_submit(h, c, cmd, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5732) static void hpsa_scan_complete(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5734) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5736) spin_lock_irqsave(&h->scan_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5737) h->scan_finished = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5738) wake_up(&h->scan_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5739) spin_unlock_irqrestore(&h->scan_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5742) static void hpsa_scan_start(struct Scsi_Host *sh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5743) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5744) struct ctlr_info *h = shost_to_hba(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5745) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5747) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5748) * Don't let rescans be initiated on a controller known to be locked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5749) * up. If the controller locks up *during* a rescan, that thread is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5750) * probably hosed, but at least we can prevent new rescan threads from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5751) * piling up on a locked up controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5752) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5753) if (unlikely(lockup_detected(h)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5754) return hpsa_scan_complete(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5756) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5757) * If a scan is already waiting to run, no need to add another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5759) spin_lock_irqsave(&h->scan_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5760) if (h->scan_waiting) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5761) spin_unlock_irqrestore(&h->scan_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5762) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5765) spin_unlock_irqrestore(&h->scan_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5767) /* wait until any scan already in progress is finished. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5768) while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5769) spin_lock_irqsave(&h->scan_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5770) if (h->scan_finished)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5771) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5772) h->scan_waiting = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5773) spin_unlock_irqrestore(&h->scan_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5774) wait_event(h->scan_wait_queue, h->scan_finished);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5775) /* Note: We don't need to worry about a race between this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5776) * thread and driver unload because the midlayer will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5777) * have incremented the reference count, so unload won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5778) * happen if we're in here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5779) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5781) h->scan_finished = 0; /* mark scan as in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5782) h->scan_waiting = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5783) spin_unlock_irqrestore(&h->scan_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5785) if (unlikely(lockup_detected(h)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5786) return hpsa_scan_complete(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5788) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5789) * Do the scan after a reset completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5790) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5791) spin_lock_irqsave(&h->reset_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5792) if (h->reset_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5793) h->drv_req_rescan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5794) spin_unlock_irqrestore(&h->reset_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5795) hpsa_scan_complete(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5796) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5798) spin_unlock_irqrestore(&h->reset_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5800) hpsa_update_scsi_devices(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5802) hpsa_scan_complete(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5805) static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5807) struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5809) if (!logical_drive)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5810) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5812) if (qdepth < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5813) qdepth = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5814) else if (qdepth > logical_drive->queue_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5815) qdepth = logical_drive->queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5817) return scsi_change_queue_depth(sdev, qdepth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5818) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5820) static int hpsa_scan_finished(struct Scsi_Host *sh,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5821) unsigned long elapsed_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5823) struct ctlr_info *h = shost_to_hba(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5824) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5825) int finished;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5827) spin_lock_irqsave(&h->scan_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5828) finished = h->scan_finished;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5829) spin_unlock_irqrestore(&h->scan_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5830) return finished;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5833) static int hpsa_scsi_host_alloc(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5835) struct Scsi_Host *sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5837) sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5838) if (sh == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5839) dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5840) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5841) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5843) sh->io_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5844) sh->n_io_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5845) sh->this_id = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5846) sh->max_channel = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5847) sh->max_cmd_len = MAX_COMMAND_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5848) sh->max_lun = HPSA_MAX_LUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5849) sh->max_id = HPSA_MAX_LUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5850) sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5851) sh->cmd_per_lun = sh->can_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5852) sh->sg_tablesize = h->maxsgentries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5853) sh->transportt = hpsa_sas_transport_template;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5854) sh->hostdata[0] = (unsigned long) h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5855) sh->irq = pci_irq_vector(h->pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5856) sh->unique_id = sh->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5858) h->scsi_host = sh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5859) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5862) static int hpsa_scsi_add_host(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5864) int rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5866) rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5867) if (rv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5868) dev_err(&h->pdev->dev, "scsi_add_host failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5869) return rv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5871) scsi_scan_host(h->scsi_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5872) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5875) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5876) * The block layer has already gone to the trouble of picking out a unique,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5877) * small-integer tag for this request. We use an offset from that value as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5878) * an index to select our command block. (The offset allows us to reserve the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5879) * low-numbered entries for our own uses.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5880) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5881) static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5883) int idx = scmd->request->tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5885) if (idx < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5886) return idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5888) /* Offset to leave space for internal cmds. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5889) return idx += HPSA_NRESERVED_CMDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5892) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5893) * Send a TEST_UNIT_READY command to the specified LUN using the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5894) * reply queue; returns zero if the unit is ready, and non-zero otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5895) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5896) static int hpsa_send_test_unit_ready(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5897) struct CommandList *c, unsigned char lunaddr[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5898) int reply_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5900) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5902) /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5903) (void) fill_cmd(c, TEST_UNIT_READY, h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5904) NULL, 0, 0, lunaddr, TYPE_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5905) rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5906) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5907) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5908) /* no unmap needed here because no data xfer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5910) /* Check if the unit is already ready. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5911) if (c->err_info->CommandStatus == CMD_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5912) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5914) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5915) * The first command sent after reset will receive "unit attention" to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5916) * indicate that the LUN has been reset...this is actually what we're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5917) * looking for (but, success is good too).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5918) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5919) if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5920) c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5921) (c->err_info->SenseInfo[2] == NO_SENSE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5922) c->err_info->SenseInfo[2] == UNIT_ATTENTION))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5923) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5925) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5928) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5929) * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5930) * returns zero when the unit is ready, and non-zero when giving up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5931) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5932) static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5933) struct CommandList *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5934) unsigned char lunaddr[], int reply_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5936) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5937) int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5938) int waittime = 1; /* seconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5940) /* Send test unit ready until device ready, or give up. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5941) for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5943) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5944) * Wait for a bit. do this first, because if we send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5945) * the TUR right away, the reset will just abort it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5947) msleep(1000 * waittime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5949) rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5950) if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5951) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5953) /* Increase wait time with each try, up to a point. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5954) if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5955) waittime *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5957) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5958) "waiting %d secs for device to become ready.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5959) waittime);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5962) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5965) static int wait_for_device_to_become_ready(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5966) unsigned char lunaddr[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5967) int reply_queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5969) int first_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5970) int last_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5971) int rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5972) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5973) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5975) c = cmd_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5977) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5978) * If no specific reply queue was requested, then send the TUR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5979) * repeatedly, requesting a reply on each reply queue; otherwise execute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5980) * the loop exactly once using only the specified queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5981) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5982) if (reply_queue == DEFAULT_REPLY_QUEUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5983) first_queue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5984) last_queue = h->nreply_queues - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5985) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5986) first_queue = reply_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5987) last_queue = reply_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5989)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5990) for (rq = first_queue; rq <= last_queue; rq++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5991) rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5992) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5993) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5996) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5997) dev_warn(&h->pdev->dev, "giving up on device.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5998) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5999) dev_warn(&h->pdev->dev, "device is ready.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6001) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6002) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6005) /* Need at least one of these error handlers to keep ../scsi/hosts.c from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6006) * complaining. Doing a host- or bus-reset can't do anything good here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6007) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6008) static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6010) int rc = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6011) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6012) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6013) struct hpsa_scsi_dev_t *dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6014) u8 reset_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6015) char msg[48];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6016) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6018) /* find the controller to which the command to be aborted was sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6019) h = sdev_to_hba(scsicmd->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6020) if (h == NULL) /* paranoia */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6021) return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6023) spin_lock_irqsave(&h->reset_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6024) h->reset_in_progress = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6025) spin_unlock_irqrestore(&h->reset_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6027) if (lockup_detected(h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6028) rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6029) goto return_reset_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6031)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6032) dev = scsicmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6033) if (!dev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6034) dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6035) rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6036) goto return_reset_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6039) if (dev->devtype == TYPE_ENCLOSURE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6040) rc = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6041) goto return_reset_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6044) /* if controller locked up, we can guarantee command won't complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6045) if (lockup_detected(h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6046) snprintf(msg, sizeof(msg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6047) "cmd %d RESET FAILED, lockup detected",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6048) hpsa_get_cmd_index(scsicmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6049) hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6050) rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6051) goto return_reset_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6054) /* this reset request might be the result of a lockup; check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6055) if (detect_controller_lockup(h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6056) snprintf(msg, sizeof(msg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6057) "cmd %d RESET FAILED, new lockup detected",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6058) hpsa_get_cmd_index(scsicmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6059) hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6060) rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6061) goto return_reset_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6062) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6064) /* Do not attempt on controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6065) if (is_hba_lunid(dev->scsi3addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6066) rc = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6067) goto return_reset_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6070) if (is_logical_dev_addr_mode(dev->scsi3addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6071) reset_type = HPSA_DEVICE_RESET_MSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6072) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6073) reset_type = HPSA_PHYS_TARGET_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6075) sprintf(msg, "resetting %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6076) reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6077) hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6078)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6079) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6080) * wait to see if any commands will complete before sending reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6081) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6082) dev->in_reset = true; /* block any new cmds from OS for this device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6083) for (i = 0; i < 10; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6084) if (atomic_read(&dev->commands_outstanding) > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6085) msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6086) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6087) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6088) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6089)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6090) /* send a reset to the SCSI LUN which the command was sent to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6091) rc = hpsa_do_reset(h, dev, reset_type, DEFAULT_REPLY_QUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6092) if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6093) rc = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6094) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6095) rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6097) sprintf(msg, "reset %s %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6098) reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6099) rc == SUCCESS ? "completed successfully" : "failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6100) hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6102) return_reset_status:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6103) spin_lock_irqsave(&h->reset_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6104) h->reset_in_progress = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6105) if (dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6106) dev->in_reset = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6107) spin_unlock_irqrestore(&h->reset_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6108) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6111) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6112) * For operations with an associated SCSI command, a command block is allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6113) * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6114) * block request tag as an index into a table of entries. cmd_tagged_free() is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6115) * the complement, although cmd_free() may be called instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6116) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6117) static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6118) struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6120) int idx = hpsa_get_cmd_index(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6121) struct CommandList *c = h->cmd_pool + idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6123) if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6124) dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6125) idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6126) /* The index value comes from the block layer, so if it's out of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6127) * bounds, it's probably not our bug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6129) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6132) if (unlikely(!hpsa_is_cmd_idle(c))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6133) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6134) * We expect that the SCSI layer will hand us a unique tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6135) * value. Thus, there should never be a collision here between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6136) * two requests...because if the selected command isn't idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6137) * then someone is going to be very disappointed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6139) if (idx != h->last_collision_tag) { /* Print once per tag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6140) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6141) "%s: tag collision (tag=%d)\n", __func__, idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6142) if (scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6143) scsi_print_command(scmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6144) h->last_collision_tag = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6146) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6149) atomic_inc(&c->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6151) hpsa_cmd_partial_init(h, idx, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6152) return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6155) static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6158) * Release our reference to the block. We don't need to do anything
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6159) * else to free it, because it is accessed by index.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6160) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6161) (void)atomic_dec(&c->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6165) * For operations that cannot sleep, a command block is allocated at init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6166) * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6167) * which ones are free or in use. Lock must be held when calling this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6168) * cmd_free() is the complement.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6169) * This function never gives up and returns NULL. If it hangs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6170) * another thread must call cmd_free() to free some tags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6173) static struct CommandList *cmd_alloc(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6175) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6176) int refcount, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6177) int offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6180) * There is some *extremely* small but non-zero chance that that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6181) * multiple threads could get in here, and one thread could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6182) * be scanning through the list of bits looking for a free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6183) * one, but the free ones are always behind him, and other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6184) * threads sneak in behind him and eat them before he can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6185) * get to them, so that while there is always a free one, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6186) * very unlucky thread might be starved anyway, never able to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6187) * beat the other threads. In reality, this happens so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6188) * infrequently as to be indistinguishable from never.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6189) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6190) * Note that we start allocating commands before the SCSI host structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6191) * is initialized. Since the search starts at bit zero, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6192) * all works, since we have at least one command structure available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6193) * however, it means that the structures with the low indexes have to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6194) * reserved for driver-initiated requests, while requests from the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6195) * layer will use the higher indexes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6198) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6199) i = find_next_zero_bit(h->cmd_pool_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6200) HPSA_NRESERVED_CMDS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6201) offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6202) if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6203) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6204) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6206) c = h->cmd_pool + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6207) refcount = atomic_inc_return(&c->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6208) if (unlikely(refcount > 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6209) cmd_free(h, c); /* already in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6210) offset = (i + 1) % HPSA_NRESERVED_CMDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6211) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6213) set_bit(i & (BITS_PER_LONG - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6214) h->cmd_pool_bits + (i / BITS_PER_LONG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6215) break; /* it's ours now. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6217) hpsa_cmd_partial_init(h, i, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6218) c->device = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6219) return c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6222) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6223) * This is the complementary operation to cmd_alloc(). Note, however, in some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6224) * corner cases it may also be used to free blocks allocated by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6225) * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6226) * the clear-bit is harmless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6228) static void cmd_free(struct ctlr_info *h, struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6230) if (atomic_dec_and_test(&c->refcount)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6231) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6233) i = c - h->cmd_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6234) clear_bit(i & (BITS_PER_LONG - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6235) h->cmd_pool_bits + (i / BITS_PER_LONG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6239) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6241) static int hpsa_ioctl32_passthru(struct scsi_device *dev, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6242) void __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6244) struct ctlr_info *h = sdev_to_hba(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6245) IOCTL32_Command_struct __user *arg32 = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6246) IOCTL_Command_struct arg64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6247) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6248) u32 cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6250) if (!arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6251) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6253) memset(&arg64, 0, sizeof(arg64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6254) if (copy_from_user(&arg64, arg32, offsetof(IOCTL_Command_struct, buf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6255) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6256) if (get_user(cp, &arg32->buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6257) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6258) arg64.buf = compat_ptr(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6260) if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6261) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6262) err = hpsa_passthru_ioctl(h, &arg64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6263) atomic_inc(&h->passthru_cmds_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6264) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6265) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6266) if (copy_to_user(&arg32->error_info, &arg64.error_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6267) sizeof(arg32->error_info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6268) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6269) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6272) static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6273) unsigned int cmd, void __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6275) struct ctlr_info *h = sdev_to_hba(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6276) BIG_IOCTL32_Command_struct __user *arg32 = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6277) BIG_IOCTL_Command_struct arg64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6278) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6279) u32 cp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6281) if (!arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6282) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6283) memset(&arg64, 0, sizeof(arg64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6284) if (copy_from_user(&arg64, arg32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6285) offsetof(BIG_IOCTL32_Command_struct, buf)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6286) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6287) if (get_user(cp, &arg32->buf))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6288) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6289) arg64.buf = compat_ptr(cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6291) if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6292) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6293) err = hpsa_big_passthru_ioctl(h, &arg64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6294) atomic_inc(&h->passthru_cmds_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6295) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6296) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6297) if (copy_to_user(&arg32->error_info, &arg64.error_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6298) sizeof(arg32->error_info)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6299) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6300) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6303) static int hpsa_compat_ioctl(struct scsi_device *dev, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6304) void __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6306) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6307) case CCISS_GETPCIINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6308) case CCISS_GETINTINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6309) case CCISS_SETINTINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6310) case CCISS_GETNODENAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6311) case CCISS_SETNODENAME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6312) case CCISS_GETHEARTBEAT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6313) case CCISS_GETBUSTYPES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6314) case CCISS_GETFIRMVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6315) case CCISS_GETDRIVVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6316) case CCISS_REVALIDVOLS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6317) case CCISS_DEREGDISK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6318) case CCISS_REGNEWDISK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6319) case CCISS_REGNEWD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6320) case CCISS_RESCANDISK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6321) case CCISS_GETLUNINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6322) return hpsa_ioctl(dev, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6324) case CCISS_PASSTHRU32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6325) return hpsa_ioctl32_passthru(dev, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6326) case CCISS_BIG_PASSTHRU32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6327) return hpsa_ioctl32_big_passthru(dev, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6329) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6330) return -ENOIOCTLCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6333) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6335) static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6337) struct hpsa_pci_info pciinfo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6339) if (!argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6340) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6341) pciinfo.domain = pci_domain_nr(h->pdev->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6342) pciinfo.bus = h->pdev->bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6343) pciinfo.dev_fn = h->pdev->devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6344) pciinfo.board_id = h->board_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6345) if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6346) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6347) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6349)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6350) static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6352) DriverVer_type DriverVer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6353) unsigned char vmaj, vmin, vsubmin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6354) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6356) rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6357) &vmaj, &vmin, &vsubmin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6358) if (rc != 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6359) dev_info(&h->pdev->dev, "driver version string '%s' "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6360) "unrecognized.", HPSA_DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6361) vmaj = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6362) vmin = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6363) vsubmin = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6365) DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6366) if (!argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6367) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6368) if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6369) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6370) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6373) static int hpsa_passthru_ioctl(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6374) IOCTL_Command_struct *iocommand)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6376) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6377) char *buff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6378) u64 temp64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6379) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6381) if (!capable(CAP_SYS_RAWIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6382) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6383) if ((iocommand->buf_size < 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6384) (iocommand->Request.Type.Direction != XFER_NONE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6385) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6387) if (iocommand->buf_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6388) buff = kmalloc(iocommand->buf_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6389) if (buff == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6390) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6391) if (iocommand->Request.Type.Direction & XFER_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6392) /* Copy the data into the buffer we created */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6393) if (copy_from_user(buff, iocommand->buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6394) iocommand->buf_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6395) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6396) goto out_kfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6398) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6399) memset(buff, 0, iocommand->buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6402) c = cmd_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6404) /* Fill in the command type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6405) c->cmd_type = CMD_IOCTL_PEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6406) c->scsi_cmd = SCSI_CMD_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6407) /* Fill in Command Header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6408) c->Header.ReplyQueue = 0; /* unused in simple mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6409) if (iocommand->buf_size > 0) { /* buffer to fill */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6410) c->Header.SGList = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6411) c->Header.SGTotal = cpu_to_le16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6412) } else { /* no buffers to fill */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6413) c->Header.SGList = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6414) c->Header.SGTotal = cpu_to_le16(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6416) memcpy(&c->Header.LUN, &iocommand->LUN_info, sizeof(c->Header.LUN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6418) /* Fill in Request block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6419) memcpy(&c->Request, &iocommand->Request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6420) sizeof(c->Request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6422) /* Fill in the scatter gather information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6423) if (iocommand->buf_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6424) temp64 = dma_map_single(&h->pdev->dev, buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6425) iocommand->buf_size, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6426) if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6427) c->SG[0].Addr = cpu_to_le64(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6428) c->SG[0].Len = cpu_to_le32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6429) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6430) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6432) c->SG[0].Addr = cpu_to_le64(temp64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6433) c->SG[0].Len = cpu_to_le32(iocommand->buf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6434) c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6436) rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6437) NO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6438) if (iocommand->buf_size > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6439) hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6440) check_ioctl_unit_attention(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6441) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6442) rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6443) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6446) /* Copy the error information out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6447) memcpy(&iocommand->error_info, c->err_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6448) sizeof(iocommand->error_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6449) if ((iocommand->Request.Type.Direction & XFER_READ) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6450) iocommand->buf_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6451) /* Copy the data out of the buffer we created */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6452) if (copy_to_user(iocommand->buf, buff, iocommand->buf_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6453) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6454) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6457) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6458) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6459) out_kfree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6460) kfree(buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6461) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6464) static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6465) BIG_IOCTL_Command_struct *ioc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6467) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6468) unsigned char **buff = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6469) int *buff_size = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6470) u64 temp64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6471) BYTE sg_used = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6472) int status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6473) u32 left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6474) u32 sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6475) BYTE __user *data_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6477) if (!capable(CAP_SYS_RAWIO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6478) return -EPERM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6480) if ((ioc->buf_size < 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6481) (ioc->Request.Type.Direction != XFER_NONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6482) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6483) /* Check kmalloc limits using all SGs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6484) if (ioc->malloc_size > MAX_KMALLOC_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6485) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6486) if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6487) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6488) buff = kcalloc(SG_ENTRIES_IN_CMD, sizeof(char *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6489) if (!buff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6490) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6491) goto cleanup1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6493) buff_size = kmalloc_array(SG_ENTRIES_IN_CMD, sizeof(int), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6494) if (!buff_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6495) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6496) goto cleanup1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6497) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6498) left = ioc->buf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6499) data_ptr = ioc->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6500) while (left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6501) sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6502) buff_size[sg_used] = sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6503) buff[sg_used] = kmalloc(sz, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6504) if (buff[sg_used] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6505) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6506) goto cleanup1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6508) if (ioc->Request.Type.Direction & XFER_WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6509) if (copy_from_user(buff[sg_used], data_ptr, sz)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6510) status = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6511) goto cleanup1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6513) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6514) memset(buff[sg_used], 0, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6515) left -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6516) data_ptr += sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6517) sg_used++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6519) c = cmd_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6521) c->cmd_type = CMD_IOCTL_PEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6522) c->scsi_cmd = SCSI_CMD_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6523) c->Header.ReplyQueue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6524) c->Header.SGList = (u8) sg_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6525) c->Header.SGTotal = cpu_to_le16(sg_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6526) memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6527) memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6528) if (ioc->buf_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6529) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6530) for (i = 0; i < sg_used; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6531) temp64 = dma_map_single(&h->pdev->dev, buff[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6532) buff_size[i], DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6533) if (dma_mapping_error(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6534) (dma_addr_t) temp64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6535) c->SG[i].Addr = cpu_to_le64(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6536) c->SG[i].Len = cpu_to_le32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6537) hpsa_pci_unmap(h->pdev, c, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6538) DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6539) status = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6540) goto cleanup0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6542) c->SG[i].Addr = cpu_to_le64(temp64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6543) c->SG[i].Len = cpu_to_le32(buff_size[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6544) c->SG[i].Ext = cpu_to_le32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6546) c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6548) status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6549) NO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6550) if (sg_used)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6551) hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6552) check_ioctl_unit_attention(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6553) if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6554) status = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6555) goto cleanup0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6558) /* Copy the error information out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6559) memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6560) if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6561) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6563) /* Copy the data out of the buffer we created */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6564) BYTE __user *ptr = ioc->buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6565) for (i = 0; i < sg_used; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6566) if (copy_to_user(ptr, buff[i], buff_size[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6567) status = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6568) goto cleanup0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6569) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6570) ptr += buff_size[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6573) status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6574) cleanup0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6575) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6576) cleanup1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6577) if (buff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6578) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6580) for (i = 0; i < sg_used; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6581) kfree(buff[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6582) kfree(buff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6584) kfree(buff_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6585) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6588) static void check_ioctl_unit_attention(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6589) struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6591) if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6592) c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6593) (void) check_for_unit_attention(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6596) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6597) * ioctl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6598) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6599) static int hpsa_ioctl(struct scsi_device *dev, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6600) void __user *argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6602) struct ctlr_info *h = sdev_to_hba(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6603) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6605) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6606) case CCISS_DEREGDISK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6607) case CCISS_REGNEWDISK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6608) case CCISS_REGNEWD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6609) hpsa_scan_start(h->scsi_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6610) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6611) case CCISS_GETPCIINFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6612) return hpsa_getpciinfo_ioctl(h, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6613) case CCISS_GETDRIVVER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6614) return hpsa_getdrivver_ioctl(h, argp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6615) case CCISS_PASSTHRU: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6616) IOCTL_Command_struct iocommand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6618) if (!argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6619) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6620) if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6621) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6622) if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6623) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6624) rc = hpsa_passthru_ioctl(h, &iocommand);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6625) atomic_inc(&h->passthru_cmds_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6626) if (!rc && copy_to_user(argp, &iocommand, sizeof(iocommand)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6627) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6628) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6630) case CCISS_BIG_PASSTHRU: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6631) BIG_IOCTL_Command_struct ioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6632) if (!argp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6633) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6634) if (copy_from_user(&ioc, argp, sizeof(ioc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6635) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6636) if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6637) return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6638) rc = hpsa_big_passthru_ioctl(h, &ioc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6639) atomic_inc(&h->passthru_cmds_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6640) if (!rc && copy_to_user(argp, &ioc, sizeof(ioc)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6641) rc = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6642) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6644) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6645) return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6649) static void hpsa_send_host_reset(struct ctlr_info *h, u8 reset_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6651) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6653) c = cmd_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6655) /* fill_cmd can't fail here, no data buffer to map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6656) (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6657) RAID_CTLR_LUNID, TYPE_MSG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6658) c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6659) c->waiting = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6660) enqueue_cmd_and_start_io(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6661) /* Don't wait for completion, the reset won't complete. Don't free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6662) * the command either. This is the last command we will send before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6663) * re-initializing everything, so it doesn't matter and won't leak.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6664) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6665) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6668) static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6669) void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6670) int cmd_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6672) enum dma_data_direction dir = DMA_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6674) c->cmd_type = CMD_IOCTL_PEND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6675) c->scsi_cmd = SCSI_CMD_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6676) c->Header.ReplyQueue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6677) if (buff != NULL && size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6678) c->Header.SGList = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6679) c->Header.SGTotal = cpu_to_le16(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6680) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6681) c->Header.SGList = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6682) c->Header.SGTotal = cpu_to_le16(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6684) memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6686) if (cmd_type == TYPE_CMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6687) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6688) case HPSA_INQUIRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6689) /* are we trying to read a vital product page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6690) if (page_code & VPD_PAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6691) c->Request.CDB[1] = 0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6692) c->Request.CDB[2] = (page_code & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6694) c->Request.CDBLen = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6695) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6696) TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6697) c->Request.Timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6698) c->Request.CDB[0] = HPSA_INQUIRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6699) c->Request.CDB[4] = size & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6700) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6701) case RECEIVE_DIAGNOSTIC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6702) c->Request.CDBLen = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6703) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6704) TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6705) c->Request.Timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6706) c->Request.CDB[0] = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6707) c->Request.CDB[1] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6708) c->Request.CDB[2] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6709) c->Request.CDB[3] = (size >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6710) c->Request.CDB[4] = size & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6711) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6712) case HPSA_REPORT_LOG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6713) case HPSA_REPORT_PHYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6714) /* Talking to controller so It's a physical command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6715) mode = 00 target = 0. Nothing to write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6716) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6717) c->Request.CDBLen = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6718) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6719) TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6720) c->Request.Timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6721) c->Request.CDB[0] = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6722) c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6723) c->Request.CDB[7] = (size >> 16) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6724) c->Request.CDB[8] = (size >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6725) c->Request.CDB[9] = size & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6726) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6727) case BMIC_SENSE_DIAG_OPTIONS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6728) c->Request.CDBLen = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6729) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6730) TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6731) c->Request.Timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6732) /* Spec says this should be BMIC_WRITE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6733) c->Request.CDB[0] = BMIC_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6734) c->Request.CDB[6] = BMIC_SENSE_DIAG_OPTIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6735) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6736) case BMIC_SET_DIAG_OPTIONS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6737) c->Request.CDBLen = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6738) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6739) TYPE_ATTR_DIR(cmd_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6740) ATTR_SIMPLE, XFER_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6741) c->Request.Timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6742) c->Request.CDB[0] = BMIC_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6743) c->Request.CDB[6] = BMIC_SET_DIAG_OPTIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6744) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6745) case HPSA_CACHE_FLUSH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6746) c->Request.CDBLen = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6747) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6748) TYPE_ATTR_DIR(cmd_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6749) ATTR_SIMPLE, XFER_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6750) c->Request.Timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6751) c->Request.CDB[0] = BMIC_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6752) c->Request.CDB[6] = BMIC_CACHE_FLUSH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6753) c->Request.CDB[7] = (size >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6754) c->Request.CDB[8] = size & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6755) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6756) case TEST_UNIT_READY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6757) c->Request.CDBLen = 6;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6758) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6759) TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6760) c->Request.Timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6761) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6762) case HPSA_GET_RAID_MAP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6763) c->Request.CDBLen = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6764) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6765) TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6766) c->Request.Timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6767) c->Request.CDB[0] = HPSA_CISS_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6768) c->Request.CDB[1] = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6769) c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6770) c->Request.CDB[7] = (size >> 16) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6771) c->Request.CDB[8] = (size >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6772) c->Request.CDB[9] = size & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6773) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6774) case BMIC_SENSE_CONTROLLER_PARAMETERS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6775) c->Request.CDBLen = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6776) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6777) TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6778) c->Request.Timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6779) c->Request.CDB[0] = BMIC_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6780) c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6781) c->Request.CDB[7] = (size >> 16) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6782) c->Request.CDB[8] = (size >> 8) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6783) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6784) case BMIC_IDENTIFY_PHYSICAL_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6785) c->Request.CDBLen = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6786) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6787) TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6788) c->Request.Timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6789) c->Request.CDB[0] = BMIC_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6790) c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6791) c->Request.CDB[7] = (size >> 16) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6792) c->Request.CDB[8] = (size >> 8) & 0XFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6793) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6794) case BMIC_SENSE_SUBSYSTEM_INFORMATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6795) c->Request.CDBLen = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6796) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6797) TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6798) c->Request.Timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6799) c->Request.CDB[0] = BMIC_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6800) c->Request.CDB[6] = BMIC_SENSE_SUBSYSTEM_INFORMATION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6801) c->Request.CDB[7] = (size >> 16) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6802) c->Request.CDB[8] = (size >> 8) & 0XFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6803) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6804) case BMIC_SENSE_STORAGE_BOX_PARAMS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6805) c->Request.CDBLen = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6806) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6807) TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6808) c->Request.Timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6809) c->Request.CDB[0] = BMIC_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6810) c->Request.CDB[6] = BMIC_SENSE_STORAGE_BOX_PARAMS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6811) c->Request.CDB[7] = (size >> 16) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6812) c->Request.CDB[8] = (size >> 8) & 0XFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6813) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6814) case BMIC_IDENTIFY_CONTROLLER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6815) c->Request.CDBLen = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6816) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6817) TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6818) c->Request.Timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6819) c->Request.CDB[0] = BMIC_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6820) c->Request.CDB[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6821) c->Request.CDB[2] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6822) c->Request.CDB[3] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6823) c->Request.CDB[4] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6824) c->Request.CDB[5] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6825) c->Request.CDB[6] = BMIC_IDENTIFY_CONTROLLER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6826) c->Request.CDB[7] = (size >> 16) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6827) c->Request.CDB[8] = (size >> 8) & 0XFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6828) c->Request.CDB[9] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6829) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6830) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6831) dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6832) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6834) } else if (cmd_type == TYPE_MSG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6835) switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6837) case HPSA_PHYS_TARGET_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6838) c->Request.CDBLen = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6839) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6840) TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6841) c->Request.Timeout = 0; /* Don't time out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6842) memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6843) c->Request.CDB[0] = HPSA_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6844) c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6845) /* Physical target reset needs no control bytes 4-7*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6846) c->Request.CDB[4] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6847) c->Request.CDB[5] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6848) c->Request.CDB[6] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6849) c->Request.CDB[7] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6850) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6851) case HPSA_DEVICE_RESET_MSG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6852) c->Request.CDBLen = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6853) c->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6854) TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6855) c->Request.Timeout = 0; /* Don't time out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6856) memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6857) c->Request.CDB[0] = cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6858) c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6859) /* If bytes 4-7 are zero, it means reset the */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6860) /* LunID device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6861) c->Request.CDB[4] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6862) c->Request.CDB[5] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6863) c->Request.CDB[6] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6864) c->Request.CDB[7] = 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6865) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6866) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6867) dev_warn(&h->pdev->dev, "unknown message type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6868) cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6869) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6871) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6872) dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6873) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6876) switch (GET_DIR(c->Request.type_attr_dir)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6877) case XFER_READ:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6878) dir = DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6879) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6880) case XFER_WRITE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6881) dir = DMA_TO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6882) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6883) case XFER_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6884) dir = DMA_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6885) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6886) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6887) dir = DMA_BIDIRECTIONAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6889) if (hpsa_map_one(h->pdev, c, buff, size, dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6890) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6891) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6894) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6895) * Map (physical) PCI mem into (virtual) kernel space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6896) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6897) static void __iomem *remap_pci_mem(ulong base, ulong size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6898) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6899) ulong page_base = ((ulong) base) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6900) ulong page_offs = ((ulong) base) - page_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6901) void __iomem *page_remapped = ioremap(page_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6902) page_offs + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6903)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6904) return page_remapped ? (page_remapped + page_offs) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6907) static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6909) return h->access.command_completed(h, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6911)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6912) static inline bool interrupt_pending(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6914) return h->access.intr_pending(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6917) static inline long interrupt_not_for_us(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6919) return (h->access.intr_pending(h) == 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6920) (h->interrupts_enabled == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6923) static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6924) u32 raw_tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6926) if (unlikely(tag_index >= h->nr_cmds)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6927) dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6928) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6930) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6933) static inline void finish_cmd(struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6935) dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6936) if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6937) || c->cmd_type == CMD_IOACCEL2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6938) complete_scsi_command(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6939) else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6940) complete(c->waiting);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6943) /* process completion of an indexed ("direct lookup") command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6944) static inline void process_indexed_cmd(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6945) u32 raw_tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6947) u32 tag_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6948) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6950) tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6951) if (!bad_tag(h, tag_index, raw_tag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6952) c = h->cmd_pool + tag_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6953) finish_cmd(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6955) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6956)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6957) /* Some controllers, like p400, will give us one interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6958) * after a soft reset, even if we turned interrupts off.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6959) * Only need to check for this in the hpsa_xxx_discard_completions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6960) * functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6961) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6962) static int ignore_bogus_interrupt(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6964) if (likely(!reset_devices))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6965) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6967) if (likely(h->interrupts_enabled))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6968) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6970) dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6971) "(known firmware bug.) Ignoring.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6973) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6975)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6976) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6977) * Convert &h->q[x] (passed to interrupt handlers) back to h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6978) * Relies on (h-q[x] == x) being true for x such that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6979) * 0 <= x < MAX_REPLY_QUEUES.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6980) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6981) static struct ctlr_info *queue_to_hba(u8 *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6982) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6983) return container_of((queue - *queue), struct ctlr_info, q[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6986) static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6987) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6988) struct ctlr_info *h = queue_to_hba(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6989) u8 q = *(u8 *) queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6990) u32 raw_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6992) if (ignore_bogus_interrupt(h))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6993) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6995) if (interrupt_not_for_us(h))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6996) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6997) h->last_intr_timestamp = get_jiffies_64();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6998) while (interrupt_pending(h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6999) raw_tag = get_next_completion(h, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7000) while (raw_tag != FIFO_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7001) raw_tag = next_command(h, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7002) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7003) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7006) static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7008) struct ctlr_info *h = queue_to_hba(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7009) u32 raw_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7010) u8 q = *(u8 *) queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7012) if (ignore_bogus_interrupt(h))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7013) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7015) h->last_intr_timestamp = get_jiffies_64();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7016) raw_tag = get_next_completion(h, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7017) while (raw_tag != FIFO_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7018) raw_tag = next_command(h, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7019) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7022) static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7024) struct ctlr_info *h = queue_to_hba((u8 *) queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7025) u32 raw_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7026) u8 q = *(u8 *) queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7028) if (interrupt_not_for_us(h))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7029) return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7030) h->last_intr_timestamp = get_jiffies_64();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7031) while (interrupt_pending(h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7032) raw_tag = get_next_completion(h, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7033) while (raw_tag != FIFO_EMPTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7034) process_indexed_cmd(h, raw_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7035) raw_tag = next_command(h, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7038) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7039) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7041) static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7043) struct ctlr_info *h = queue_to_hba(queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7044) u32 raw_tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7045) u8 q = *(u8 *) queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7047) h->last_intr_timestamp = get_jiffies_64();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7048) raw_tag = get_next_completion(h, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7049) while (raw_tag != FIFO_EMPTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7050) process_indexed_cmd(h, raw_tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7051) raw_tag = next_command(h, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7053) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7056) /* Send a message CDB to the firmware. Careful, this only works
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7057) * in simple mode, not performant mode due to the tag lookup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7058) * We only ever use this immediately after a controller reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7059) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7060) static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7061) unsigned char type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7063) struct Command {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7064) struct CommandListHeader CommandHeader;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7065) struct RequestBlock Request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7066) struct ErrDescriptor ErrorDescriptor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7067) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7068) struct Command *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7069) static const size_t cmd_sz = sizeof(*cmd) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7070) sizeof(cmd->ErrorDescriptor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7071) dma_addr_t paddr64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7072) __le32 paddr32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7073) u32 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7074) void __iomem *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7075) int i, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7076)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7077) vaddr = pci_ioremap_bar(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7078) if (vaddr == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7079) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7081) /* The Inbound Post Queue only accepts 32-bit physical addresses for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7082) * CCISS commands, so they must be allocated from the lower 4GiB of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7083) * memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7084) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7085) err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7086) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7087) iounmap(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7088) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7089) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7091) cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7092) if (cmd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7093) iounmap(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7094) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7097) /* This must fit, because of the 32-bit consistent DMA mask. Also,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7098) * although there's no guarantee, we assume that the address is at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7099) * least 4-byte aligned (most likely, it's page-aligned).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7101) paddr32 = cpu_to_le32(paddr64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7103) cmd->CommandHeader.ReplyQueue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7104) cmd->CommandHeader.SGList = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7105) cmd->CommandHeader.SGTotal = cpu_to_le16(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7106) cmd->CommandHeader.tag = cpu_to_le64(paddr64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7107) memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7109) cmd->Request.CDBLen = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7110) cmd->Request.type_attr_dir =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7111) TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7112) cmd->Request.Timeout = 0; /* Don't time out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7113) cmd->Request.CDB[0] = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7114) cmd->Request.CDB[1] = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7115) memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7116) cmd->ErrorDescriptor.Addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7117) cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7118) cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7120) writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7122) for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7123) tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7124) if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7125) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7126) msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7129) iounmap(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7131) /* we leak the DMA buffer here ... no choice since the controller could
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7132) * still complete the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7133) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7134) if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7135) dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7136) opcode, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7137) return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7140) dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7142) if (tag & HPSA_ERROR_BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7143) dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7144) opcode, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7145) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7148) dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7149) opcode, type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7150) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7153) #define hpsa_noop(p) hpsa_message(p, 3, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7155) static int hpsa_controller_hard_reset(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7156) void __iomem *vaddr, u32 use_doorbell)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7159) if (use_doorbell) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7160) /* For everything after the P600, the PCI power state method
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7161) * of resetting the controller doesn't work, so we have this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7162) * other way using the doorbell register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7164) dev_info(&pdev->dev, "using doorbell to reset controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7165) writel(use_doorbell, vaddr + SA5_DOORBELL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7167) /* PMC hardware guys tell us we need a 10 second delay after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7168) * doorbell reset and before any attempt to talk to the board
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7169) * at all to ensure that this actually works and doesn't fall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7170) * over in some weird corner cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7172) msleep(10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7173) } else { /* Try to do it the PCI power state way */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7175) /* Quoting from the Open CISS Specification: "The Power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7176) * Management Control/Status Register (CSR) controls the power
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7177) * state of the device. The normal operating state is D0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7178) * CSR=00h. The software off state is D3, CSR=03h. To reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7179) * the controller, place the interface device in D3 then to D0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7180) * this causes a secondary PCI reset which will reset the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7181) * controller." */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7183) int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7185) dev_info(&pdev->dev, "using PCI PM to reset controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7187) /* enter the D3hot power management state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7188) rc = pci_set_power_state(pdev, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7189) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7190) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7192) msleep(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7194) /* enter the D0 power management state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7195) rc = pci_set_power_state(pdev, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7196) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7197) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7200) * The P600 requires a small delay when changing states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7201) * Otherwise we may think the board did not reset and we bail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7202) * This for kdump only and is particular to the P600.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7204) msleep(500);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7206) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7209) static void init_driver_version(char *driver_version, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7211) memset(driver_version, 0, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7212) strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7215) static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7217) char *driver_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7218) int i, size = sizeof(cfgtable->driver_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7220) driver_version = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7221) if (!driver_version)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7222) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7224) init_driver_version(driver_version, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7225) for (i = 0; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7226) writeb(driver_version[i], &cfgtable->driver_version[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7227) kfree(driver_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7228) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7231) static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7232) unsigned char *driver_ver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7234) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7236) for (i = 0; i < sizeof(cfgtable->driver_version); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7237) driver_ver[i] = readb(&cfgtable->driver_version[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7240) static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7243) char *driver_ver, *old_driver_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7244) int rc, size = sizeof(cfgtable->driver_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7246) old_driver_ver = kmalloc_array(2, size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7247) if (!old_driver_ver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7248) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7249) driver_ver = old_driver_ver + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7251) /* After a reset, the 32 bytes of "driver version" in the cfgtable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7252) * should have been changed, otherwise we know the reset failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7253) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7254) init_driver_version(old_driver_ver, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7255) read_driver_ver_from_cfgtable(cfgtable, driver_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7256) rc = !memcmp(driver_ver, old_driver_ver, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7257) kfree(old_driver_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7258) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7260) /* This does a hard reset of the controller using PCI power management
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7261) * states or the using the doorbell register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7263) static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7265) u64 cfg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7266) u32 cfg_base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7267) u64 cfg_base_addr_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7268) void __iomem *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7269) unsigned long paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7270) u32 misc_fw_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7271) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7272) struct CfgTable __iomem *cfgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7273) u32 use_doorbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7274) u16 command_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7276) /* For controllers as old as the P600, this is very nearly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7277) * the same thing as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7278) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7279) * pci_save_state(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7280) * pci_set_power_state(pci_dev, PCI_D3hot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7281) * pci_set_power_state(pci_dev, PCI_D0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7282) * pci_restore_state(pci_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7283) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7284) * For controllers newer than the P600, the pci power state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7285) * method of resetting doesn't work so we have another way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7286) * using the doorbell register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7287) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7289) if (!ctlr_is_resettable(board_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7290) dev_warn(&pdev->dev, "Controller not resettable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7291) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7294) /* if controller is soft- but not hard resettable... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7295) if (!ctlr_is_hard_resettable(board_id))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7296) return -ENOTSUPP; /* try soft reset later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7298) /* Save the PCI command register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7299) pci_read_config_word(pdev, 4, &command_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7300) pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7302) /* find the first memory BAR, so we can find the cfg table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7303) rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7304) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7305) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7306) vaddr = remap_pci_mem(paddr, 0x250);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7307) if (!vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7308) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7310) /* find cfgtable in order to check if reset via doorbell is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7311) rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7312) &cfg_base_addr_index, &cfg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7313) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7314) goto unmap_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7315) cfgtable = remap_pci_mem(pci_resource_start(pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7316) cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7317) if (!cfgtable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7318) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7319) goto unmap_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7321) rc = write_driver_ver_to_cfgtable(cfgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7322) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7323) goto unmap_cfgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7325) /* If reset via doorbell register is supported, use that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7326) * There are two such methods. Favor the newest method.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7327) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7328) misc_fw_support = readl(&cfgtable->misc_fw_support);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7329) use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7330) if (use_doorbell) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7331) use_doorbell = DOORBELL_CTLR_RESET2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7332) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7333) use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7334) if (use_doorbell) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7335) dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7336) "Soft reset not supported. Firmware update is required.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7337) rc = -ENOTSUPP; /* try soft reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7338) goto unmap_cfgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7342) rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7343) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7344) goto unmap_cfgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7346) pci_restore_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7347) pci_write_config_word(pdev, 4, command_register);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7349) /* Some devices (notably the HP Smart Array 5i Controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7350) need a little pause here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7351) msleep(HPSA_POST_RESET_PAUSE_MSECS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7353) rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7354) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7355) dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7356) "Failed waiting for board to become ready after hard reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7357) goto unmap_cfgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7360) rc = controller_reset_failed(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7361) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7362) goto unmap_cfgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7363) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7364) dev_warn(&pdev->dev, "Unable to successfully reset "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7365) "controller. Will try soft reset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7366) rc = -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7367) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7368) dev_info(&pdev->dev, "board ready after hard reset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7371) unmap_cfgtable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7372) iounmap(cfgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7374) unmap_vaddr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7375) iounmap(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7376) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7380) * We cannot read the structure directly, for portability we must use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7381) * the io functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7382) * This is for debug only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7384) static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7386) #ifdef HPSA_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7387) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7388) char temp_name[17];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7390) dev_info(dev, "Controller Configuration information\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7391) dev_info(dev, "------------------------------------\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7392) for (i = 0; i < 4; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7393) temp_name[i] = readb(&(tb->Signature[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7394) temp_name[4] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7395) dev_info(dev, " Signature = %s\n", temp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7396) dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7397) dev_info(dev, " Transport methods supported = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7398) readl(&(tb->TransportSupport)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7399) dev_info(dev, " Transport methods active = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7400) readl(&(tb->TransportActive)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7401) dev_info(dev, " Requested transport Method = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7402) readl(&(tb->HostWrite.TransportRequest)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7403) dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7404) readl(&(tb->HostWrite.CoalIntDelay)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7405) dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7406) readl(&(tb->HostWrite.CoalIntCount)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7407) dev_info(dev, " Max outstanding commands = %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7408) readl(&(tb->CmdsOutMax)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7409) dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7410) for (i = 0; i < 16; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7411) temp_name[i] = readb(&(tb->ServerName[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7412) temp_name[16] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7413) dev_info(dev, " Server Name = %s\n", temp_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7414) dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7415) readl(&(tb->HeartBeat)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7416) #endif /* HPSA_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7419) static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7421) int i, offset, mem_type, bar_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7423) if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7424) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7425) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7426) for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7427) bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7428) if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7429) offset += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7430) else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7431) mem_type = pci_resource_flags(pdev, i) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7432) PCI_BASE_ADDRESS_MEM_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7433) switch (mem_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7434) case PCI_BASE_ADDRESS_MEM_TYPE_32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7435) case PCI_BASE_ADDRESS_MEM_TYPE_1M:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7436) offset += 4; /* 32 bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7437) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7438) case PCI_BASE_ADDRESS_MEM_TYPE_64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7439) offset += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7440) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7441) default: /* reserved in PCI 2.2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7442) dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7443) "base address is invalid\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7444) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7445) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7448) if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7449) return i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7451) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7454) static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7456) pci_free_irq_vectors(h->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7457) h->msix_vectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7460) static void hpsa_setup_reply_map(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7461) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7462) const struct cpumask *mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7463) unsigned int queue, cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7465) for (queue = 0; queue < h->msix_vectors; queue++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7466) mask = pci_irq_get_affinity(h->pdev, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7467) if (!mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7468) goto fallback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7470) for_each_cpu(cpu, mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7471) h->reply_map[cpu] = queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7473) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7475) fallback:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7476) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7477) h->reply_map[cpu] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7480) /* If MSI/MSI-X is supported by the kernel we will try to enable it on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7481) * controllers that are capable. If not, we use legacy INTx mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7483) static int hpsa_interrupt_mode(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7485) unsigned int flags = PCI_IRQ_LEGACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7486) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7488) /* Some boards advertise MSI but don't really support it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7489) switch (h->board_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7490) case 0x40700E11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7491) case 0x40800E11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7492) case 0x40820E11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7493) case 0x40830E11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7494) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7495) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7496) ret = pci_alloc_irq_vectors(h->pdev, 1, MAX_REPLY_QUEUES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7497) PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7498) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7499) h->msix_vectors = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7500) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7501) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7503) flags |= PCI_IRQ_MSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7504) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7507) ret = pci_alloc_irq_vectors(h->pdev, 1, 1, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7508) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7509) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7510) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7513) static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7514) bool *legacy_board)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7516) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7517) u32 subsystem_vendor_id, subsystem_device_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7519) subsystem_vendor_id = pdev->subsystem_vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7520) subsystem_device_id = pdev->subsystem_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7521) *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7522) subsystem_vendor_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7524) if (legacy_board)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7525) *legacy_board = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7526) for (i = 0; i < ARRAY_SIZE(products); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7527) if (*board_id == products[i].board_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7528) if (products[i].access != &SA5A_access &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7529) products[i].access != &SA5B_access)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7530) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7531) dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7532) "legacy board ID: 0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7533) *board_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7534) if (legacy_board)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7535) *legacy_board = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7536) return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7539) dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x\n", *board_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7540) if (legacy_board)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7541) *legacy_board = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7542) return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7545) static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7546) unsigned long *memory_bar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7547) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7548) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7550) for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7551) if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7552) /* addressing mode bits already removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7553) *memory_bar = pci_resource_start(pdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7554) dev_dbg(&pdev->dev, "memory BAR = %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7555) *memory_bar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7556) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7558) dev_warn(&pdev->dev, "no memory BAR found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7559) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7562) static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7563) int wait_for_ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7565) int i, iterations;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7566) u32 scratchpad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7567) if (wait_for_ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7568) iterations = HPSA_BOARD_READY_ITERATIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7569) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7570) iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7572) for (i = 0; i < iterations; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7573) scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7574) if (wait_for_ready) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7575) if (scratchpad == HPSA_FIRMWARE_READY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7576) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7577) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7578) if (scratchpad != HPSA_FIRMWARE_READY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7579) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7581) msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7582) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7583) dev_warn(&pdev->dev, "board not ready, timed out.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7584) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7587) static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7588) u32 *cfg_base_addr, u64 *cfg_base_addr_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7589) u64 *cfg_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7591) *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7592) *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7593) *cfg_base_addr &= (u32) 0x0000ffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7594) *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7595) if (*cfg_base_addr_index == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7596) dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7597) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7599) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7601)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7602) static void hpsa_free_cfgtables(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7604) if (h->transtable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7605) iounmap(h->transtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7606) h->transtable = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7607) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7608) if (h->cfgtable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7609) iounmap(h->cfgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7610) h->cfgtable = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7614) /* Find and map CISS config table and transfer table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7615) + * several items must be unmapped (freed) later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7616) + * */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7617) static int hpsa_find_cfgtables(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7619) u64 cfg_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7620) u32 cfg_base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7621) u64 cfg_base_addr_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7622) u32 trans_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7623) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7625) rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7626) &cfg_base_addr_index, &cfg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7627) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7628) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7629) h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7630) cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7631) if (!h->cfgtable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7632) dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7633) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7635) rc = write_driver_ver_to_cfgtable(h->cfgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7636) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7637) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7638) /* Find performant mode table. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7639) trans_offset = readl(&h->cfgtable->TransMethodOffset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7640) h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7641) cfg_base_addr_index)+cfg_offset+trans_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7642) sizeof(*h->transtable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7643) if (!h->transtable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7644) dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7645) hpsa_free_cfgtables(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7646) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7648) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7650)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7651) static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7653) #define MIN_MAX_COMMANDS 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7654) BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7656) h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7658) /* Limit commands in memory limited kdump scenario. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7659) if (reset_devices && h->max_commands > 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7660) h->max_commands = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7662) if (h->max_commands < MIN_MAX_COMMANDS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7663) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7664) "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7665) h->max_commands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7666) MIN_MAX_COMMANDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7667) h->max_commands = MIN_MAX_COMMANDS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7671) /* If the controller reports that the total max sg entries is greater than 512,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7672) * then we know that chained SG blocks work. (Original smart arrays did not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7673) * support chained SG blocks and would return zero for max sg entries.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7674) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7675) static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7677) return h->maxsgentries > 512;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7680) /* Interrogate the hardware for some limits:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7681) * max commands, max SG elements without chaining, and with chaining,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7682) * SG chain block size, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7683) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7684) static void hpsa_find_board_params(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7686) hpsa_get_max_perf_mode_cmds(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7687) h->nr_cmds = h->max_commands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7688) h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7689) h->fw_support = readl(&(h->cfgtable->misc_fw_support));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7690) if (hpsa_supports_chained_sg_blocks(h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7691) /* Limit in-command s/g elements to 32 save dma'able memory. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7692) h->max_cmd_sg_entries = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7693) h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7694) h->maxsgentries--; /* save one for chain pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7695) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7696) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7697) * Original smart arrays supported at most 31 s/g entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7698) * embedded inline in the command (trying to use more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7699) * would lock up the controller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7701) h->max_cmd_sg_entries = 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7702) h->maxsgentries = 31; /* default to traditional values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7703) h->chainsize = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7706) /* Find out what task management functions are supported and cache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7707) h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7708) if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7709) dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7710) if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7711) dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7712) if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7713) dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7716) static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7718) if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7719) dev_err(&h->pdev->dev, "not a valid CISS config table\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7720) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7722) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7725) static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7727) u32 driver_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7729) driver_support = readl(&(h->cfgtable->driver_support));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7730) /* Need to enable prefetch in the SCSI core for 6400 in x86 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7731) #ifdef CONFIG_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7732) driver_support |= ENABLE_SCSI_PREFETCH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7733) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7734) driver_support |= ENABLE_UNIT_ATTN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7735) writel(driver_support, &(h->cfgtable->driver_support));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7738) /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7739) * in a prefetch beyond physical memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7740) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7741) static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7742) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7743) u32 dma_prefetch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7745) if (h->board_id != 0x3225103C)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7746) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7747) dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7748) dma_prefetch |= 0x8000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7749) writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7752) static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7754) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7755) u32 doorbell_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7756) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7757) /* wait until the clear_event_notify bit 6 is cleared by controller. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7758) for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7759) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7760) doorbell_value = readl(h->vaddr + SA5_DOORBELL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7761) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7762) if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7763) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7764) /* delay and try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7765) msleep(CLEAR_EVENT_WAIT_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7767) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7768) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7769) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7772) static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7774) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7775) u32 doorbell_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7776) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7777)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7778) /* under certain very rare conditions, this can take awhile.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7779) * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7780) * as we enter this code.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7781) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7782) for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7783) if (h->remove_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7784) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7785) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7786) doorbell_value = readl(h->vaddr + SA5_DOORBELL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7787) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7788) if (!(doorbell_value & CFGTBL_ChangeReq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7789) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7790) /* delay and try again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7791) msleep(MODE_CHANGE_WAIT_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7793) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7794) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7795) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7798) /* return -ENODEV or other reason on error, 0 on success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7799) static int hpsa_enter_simple_mode(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7800) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7801) u32 trans_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7803) trans_support = readl(&(h->cfgtable->TransportSupport));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7804) if (!(trans_support & SIMPLE_MODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7805) return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7807) h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7809) /* Update the field, and then ring the doorbell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7810) writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7811) writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7812) writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7813) if (hpsa_wait_for_mode_change_ack(h))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7814) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7815) print_cfg_table(&h->pdev->dev, h->cfgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7816) if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7817) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7818) h->transMethod = CFGTBL_Trans_Simple;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7819) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7820) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7821) dev_err(&h->pdev->dev, "failed to enter simple mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7822) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7825) /* free items allocated or mapped by hpsa_pci_init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7826) static void hpsa_free_pci_init(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7828) hpsa_free_cfgtables(h); /* pci_init 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7829) iounmap(h->vaddr); /* pci_init 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7830) h->vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7831) hpsa_disable_interrupt_mode(h); /* pci_init 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7833) * call pci_disable_device before pci_release_regions per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7834) * Documentation/driver-api/pci/pci.rst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7836) pci_disable_device(h->pdev); /* pci_init 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7837) pci_release_regions(h->pdev); /* pci_init 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7840) /* several items must be freed later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7841) static int hpsa_pci_init(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7843) int prod_index, err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7844) bool legacy_board;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7846) prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id, &legacy_board);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7847) if (prod_index < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7848) return prod_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7849) h->product_name = products[prod_index].product_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7850) h->access = *(products[prod_index].access);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7851) h->legacy_board = legacy_board;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7852) pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7853) PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7855) err = pci_enable_device(h->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7856) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7857) dev_err(&h->pdev->dev, "failed to enable PCI device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7858) pci_disable_device(h->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7859) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7862) err = pci_request_regions(h->pdev, HPSA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7863) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7864) dev_err(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7865) "failed to obtain PCI resources\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7866) pci_disable_device(h->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7867) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7870) pci_set_master(h->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7871)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7872) err = hpsa_interrupt_mode(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7873) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7874) goto clean1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7876) /* setup mapping between CPU and reply queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7877) hpsa_setup_reply_map(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7879) err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7880) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7881) goto clean2; /* intmode+region, pci */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7882) h->vaddr = remap_pci_mem(h->paddr, 0x250);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7883) if (!h->vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7884) dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7885) err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7886) goto clean2; /* intmode+region, pci */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7887) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7888) err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7889) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7890) goto clean3; /* vaddr, intmode+region, pci */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7891) err = hpsa_find_cfgtables(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7892) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7893) goto clean3; /* vaddr, intmode+region, pci */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7894) hpsa_find_board_params(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7896) if (!hpsa_CISS_signature_present(h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7897) err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7898) goto clean4; /* cfgtables, vaddr, intmode+region, pci */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7900) hpsa_set_driver_support_bits(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7901) hpsa_p600_dma_prefetch_quirk(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7902) err = hpsa_enter_simple_mode(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7903) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7904) goto clean4; /* cfgtables, vaddr, intmode+region, pci */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7905) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7907) clean4: /* cfgtables, vaddr, intmode+region, pci */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7908) hpsa_free_cfgtables(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7909) clean3: /* vaddr, intmode+region, pci */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7910) iounmap(h->vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7911) h->vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7912) clean2: /* intmode+region, pci */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7913) hpsa_disable_interrupt_mode(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7914) clean1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7915) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7916) * call pci_disable_device before pci_release_regions per
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7917) * Documentation/driver-api/pci/pci.rst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7918) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7919) pci_disable_device(h->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7920) pci_release_regions(h->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7921) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7923)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7924) static void hpsa_hba_inquiry(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7925) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7926) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7928) #define HBA_INQUIRY_BYTE_COUNT 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7929) h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7930) if (!h->hba_inquiry_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7931) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7932) rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7933) h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7934) if (rc != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7935) kfree(h->hba_inquiry_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7936) h->hba_inquiry_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7937) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7940) static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7941) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7942) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7943) void __iomem *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7945) if (!reset_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7946) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7948) /* kdump kernel is loading, we don't know in which state is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7949) * the pci interface. The dev->enable_cnt is equal zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7950) * so we call enable+disable, wait a while and switch it on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7952) rc = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7953) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7954) dev_warn(&pdev->dev, "Failed to enable PCI device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7955) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7956) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7957) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7958) msleep(260); /* a randomly chosen number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7959) rc = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7960) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7961) dev_warn(&pdev->dev, "failed to enable device.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7962) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7965) pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7967) vaddr = pci_ioremap_bar(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7968) if (vaddr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7969) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7970) goto out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7972) writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7973) iounmap(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7975) /* Reset the controller with a PCI power-cycle or via doorbell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7976) rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7978) /* -ENOTSUPP here means we cannot reset the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7979) * but it's already (and still) up and running in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7980) * "performant mode". Or, it might be 640x, which can't reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7981) * due to concerns about shared bbwc between 6402/6404 pair.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7982) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7983) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7984) goto out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7986) /* Now try to get the controller to respond to a no-op */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7987) dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7988) for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7989) if (hpsa_noop(pdev) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7990) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7991) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7992) dev_warn(&pdev->dev, "no-op failed%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7993) (i < 11 ? "; re-trying" : ""));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7996) out_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7998) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7999) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8002) static void hpsa_free_cmd_pool(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8004) kfree(h->cmd_pool_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8005) h->cmd_pool_bits = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8006) if (h->cmd_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8007) dma_free_coherent(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8008) h->nr_cmds * sizeof(struct CommandList),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8009) h->cmd_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8010) h->cmd_pool_dhandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8011) h->cmd_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8012) h->cmd_pool_dhandle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8014) if (h->errinfo_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8015) dma_free_coherent(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8016) h->nr_cmds * sizeof(struct ErrorInfo),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8017) h->errinfo_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8018) h->errinfo_pool_dhandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8019) h->errinfo_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8020) h->errinfo_pool_dhandle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8024) static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8026) h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8027) sizeof(unsigned long),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8028) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8029) h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8030) h->nr_cmds * sizeof(*h->cmd_pool),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8031) &h->cmd_pool_dhandle, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8032) h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8033) h->nr_cmds * sizeof(*h->errinfo_pool),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8034) &h->errinfo_pool_dhandle, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8035) if ((h->cmd_pool_bits == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8036) || (h->cmd_pool == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8037) || (h->errinfo_pool == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8038) dev_err(&h->pdev->dev, "out of memory in %s", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8039) goto clean_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8041) hpsa_preinitialize_commands(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8042) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8043) clean_up:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8044) hpsa_free_cmd_pool(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8045) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8048) /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8049) static void hpsa_free_irqs(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8050) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8051) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8052) int irq_vector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8054) if (hpsa_simple_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8055) irq_vector = h->intr_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8057) if (!h->msix_vectors || h->intr_mode != PERF_MODE_INT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8058) /* Single reply queue, only one irq to free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8059) free_irq(pci_irq_vector(h->pdev, irq_vector),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8060) &h->q[h->intr_mode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8061) h->q[h->intr_mode] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8062) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8065) for (i = 0; i < h->msix_vectors; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8066) free_irq(pci_irq_vector(h->pdev, i), &h->q[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8067) h->q[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8069) for (; i < MAX_REPLY_QUEUES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8070) h->q[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8072)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8073) /* returns 0 on success; cleans up and returns -Enn on error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8074) static int hpsa_request_irqs(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8075) irqreturn_t (*msixhandler)(int, void *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8076) irqreturn_t (*intxhandler)(int, void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8078) int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8079) int irq_vector = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8081) if (hpsa_simple_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8082) irq_vector = h->intr_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8083)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8084) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8085) * initialize h->q[x] = x so that interrupt handlers know which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8086) * queue to process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8087) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8088) for (i = 0; i < MAX_REPLY_QUEUES; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8089) h->q[i] = (u8) i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8090)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8091) if (h->intr_mode == PERF_MODE_INT && h->msix_vectors > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8092) /* If performant mode and MSI-X, use multiple reply queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8093) for (i = 0; i < h->msix_vectors; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8094) sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8095) rc = request_irq(pci_irq_vector(h->pdev, i), msixhandler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8096) 0, h->intrname[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8097) &h->q[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8098) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8099) int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8101) dev_err(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8102) "failed to get irq %d for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8103) pci_irq_vector(h->pdev, i), h->devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8104) for (j = 0; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8105) free_irq(pci_irq_vector(h->pdev, j), &h->q[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8106) h->q[j] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8108) for (; j < MAX_REPLY_QUEUES; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8109) h->q[j] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8110) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8113) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8114) /* Use single reply pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8115) if (h->msix_vectors > 0 || h->pdev->msi_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8116) sprintf(h->intrname[0], "%s-msi%s", h->devname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8117) h->msix_vectors ? "x" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8118) rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8119) msixhandler, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8120) h->intrname[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8121) &h->q[h->intr_mode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8122) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8123) sprintf(h->intrname[h->intr_mode],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8124) "%s-intx", h->devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8125) rc = request_irq(pci_irq_vector(h->pdev, irq_vector),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8126) intxhandler, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8127) h->intrname[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8128) &h->q[h->intr_mode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8131) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8132) dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8133) pci_irq_vector(h->pdev, irq_vector), h->devname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8134) hpsa_free_irqs(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8135) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8137) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8140) static int hpsa_kdump_soft_reset(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8142) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8143) hpsa_send_host_reset(h, HPSA_RESET_TYPE_CONTROLLER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8145) dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8146) rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8147) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8148) dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8149) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8152) dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8153) rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8154) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8155) dev_warn(&h->pdev->dev, "Board failed to become ready "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8156) "after soft reset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8157) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8160) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8163) static void hpsa_free_reply_queues(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8165) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8167) for (i = 0; i < h->nreply_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8168) if (!h->reply_queue[i].head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8169) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8170) dma_free_coherent(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8171) h->reply_queue_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8172) h->reply_queue[i].head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8173) h->reply_queue[i].busaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8174) h->reply_queue[i].head = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8175) h->reply_queue[i].busaddr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8177) h->reply_queue_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8180) static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8182) hpsa_free_performant_mode(h); /* init_one 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8183) hpsa_free_sg_chain_blocks(h); /* init_one 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8184) hpsa_free_cmd_pool(h); /* init_one 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8185) hpsa_free_irqs(h); /* init_one 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8186) scsi_host_put(h->scsi_host); /* init_one 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8187) h->scsi_host = NULL; /* init_one 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8188) hpsa_free_pci_init(h); /* init_one 2_5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8189) free_percpu(h->lockup_detected); /* init_one 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8190) h->lockup_detected = NULL; /* init_one 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8191) if (h->resubmit_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8192) destroy_workqueue(h->resubmit_wq); /* init_one 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8193) h->resubmit_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8195) if (h->rescan_ctlr_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8196) destroy_workqueue(h->rescan_ctlr_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8197) h->rescan_ctlr_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8199) if (h->monitor_ctlr_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8200) destroy_workqueue(h->monitor_ctlr_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8201) h->monitor_ctlr_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8204) kfree(h); /* init_one 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8207) /* Called when controller lockup detected. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8208) static void fail_all_outstanding_cmds(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8210) int i, refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8211) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8212) int failcount = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8214) flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8215) for (i = 0; i < h->nr_cmds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8216) c = h->cmd_pool + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8217) refcount = atomic_inc_return(&c->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8218) if (refcount > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8219) c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8220) finish_cmd(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8221) atomic_dec(&h->commands_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8222) failcount++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8224) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8226) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8227) "failed %d commands in fail_all\n", failcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8230) static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8232) int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8234) for_each_online_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8235) u32 *lockup_detected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8236) lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8237) *lockup_detected = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8239) wmb(); /* be sure the per-cpu variables are out to memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8242) static void controller_lockup_detected(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8244) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8245) u32 lockup_detected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8247) h->access.set_intr_mask(h, HPSA_INTR_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8248) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8249) lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8250) if (!lockup_detected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8251) /* no heartbeat, but controller gave us a zero. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8252) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8253) "lockup detected after %d but scratchpad register is zero\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8254) h->heartbeat_sample_interval / HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8255) lockup_detected = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8257) set_lockup_detected_for_all_cpus(h, lockup_detected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8258) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8259) dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8260) lockup_detected, h->heartbeat_sample_interval / HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8261) if (lockup_detected == 0xffff0000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8262) dev_warn(&h->pdev->dev, "Telling controller to do a CHKPT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8263) writel(DOORBELL_GENERATE_CHKPT, h->vaddr + SA5_DOORBELL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8265) pci_disable_device(h->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8266) fail_all_outstanding_cmds(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8269) static int detect_controller_lockup(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8271) u64 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8272) u32 heartbeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8273) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8275) now = get_jiffies_64();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8276) /* If we've received an interrupt recently, we're ok. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8277) if (time_after64(h->last_intr_timestamp +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8278) (h->heartbeat_sample_interval), now))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8279) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8281) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8282) * If we've already checked the heartbeat recently, we're ok.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8283) * This could happen if someone sends us a signal. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8284) * otherwise don't care about signals in this thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8286) if (time_after64(h->last_heartbeat_timestamp +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8287) (h->heartbeat_sample_interval), now))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8288) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8290) /* If heartbeat has not changed since we last looked, we're not ok. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8291) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8292) heartbeat = readl(&h->cfgtable->HeartBeat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8293) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8294) if (h->last_heartbeat == heartbeat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8295) controller_lockup_detected(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8296) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8299) /* We're ok. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8300) h->last_heartbeat = heartbeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8301) h->last_heartbeat_timestamp = now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8302) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8306) * Set ioaccel status for all ioaccel volumes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8307) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8308) * Called from monitor controller worker (hpsa_event_monitor_worker)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8309) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8310) * A Volume (or Volumes that comprise an Array set) may be undergoing a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8311) * transformation, so we will be turning off ioaccel for all volumes that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8312) * make up the Array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8314) static void hpsa_set_ioaccel_status(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8316) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8317) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8318) u8 ioaccel_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8319) unsigned char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8320) struct hpsa_scsi_dev_t *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8322) if (!h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8323) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8325) buf = kmalloc(64, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8326) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8327) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8329) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8330) * Run through current device list used during I/O requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8331) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8332) for (i = 0; i < h->ndevices; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8333) int offload_to_be_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8334) int offload_config = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8336) device = h->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8338) if (!device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8339) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8340) if (!hpsa_vpd_page_supported(h, device->scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8341) HPSA_VPD_LV_IOACCEL_STATUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8342) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8344) memset(buf, 0, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8346) rc = hpsa_scsi_do_inquiry(h, device->scsi3addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8347) VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8348) buf, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8349) if (rc != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8350) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8352) ioaccel_status = buf[IOACCEL_STATUS_BYTE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8354) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8355) * Check if offload is still configured on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8357) offload_config =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8358) !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8359) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8360) * If offload is configured on, check to see if ioaccel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8361) * needs to be enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8362) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8363) if (offload_config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8364) offload_to_be_enabled =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8365) !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8368) * If ioaccel is to be re-enabled, re-enable later during the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8369) * scan operation so the driver can get a fresh raidmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8370) * before turning ioaccel back on.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8371) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8372) if (offload_to_be_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8373) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8375) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8376) * Immediately turn off ioaccel for any volume the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8377) * controller tells us to. Some of the reasons could be:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8378) * transformation - change to the LVs of an Array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8379) * degraded volume - component failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8380) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8381) hpsa_turn_off_ioaccel_for_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8384) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8387) static void hpsa_ack_ctlr_events(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8389) char *event_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8391) if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8392) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8394) /* Ask the controller to clear the events we're handling. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8395) if ((h->transMethod & (CFGTBL_Trans_io_accel1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8396) | CFGTBL_Trans_io_accel2)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8397) (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8398) h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8400) if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8401) event_type = "state change";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8402) if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8403) event_type = "configuration change";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8404) /* Stop sending new RAID offload reqs via the IO accelerator */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8405) scsi_block_requests(h->scsi_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8406) hpsa_set_ioaccel_status(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8407) hpsa_drain_accel_commands(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8408) /* Set 'accelerator path config change' bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8409) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8410) "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8411) h->events, event_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8412) writel(h->events, &(h->cfgtable->clear_event_notify));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8413) /* Set the "clear event notify field update" bit 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8414) writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8415) /* Wait until ctlr clears 'clear event notify field', bit 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8416) hpsa_wait_for_clear_event_notify_ack(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8417) scsi_unblock_requests(h->scsi_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8418) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8419) /* Acknowledge controller notification events. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8420) writel(h->events, &(h->cfgtable->clear_event_notify));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8421) writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8422) hpsa_wait_for_clear_event_notify_ack(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8424) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8427) /* Check a register on the controller to see if there are configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8428) * changes (added/changed/removed logical drives, etc.) which mean that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8429) * we should rescan the controller for devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8430) * Also check flag for driver-initiated rescan.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8431) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8432) static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8434) if (h->drv_req_rescan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8435) h->drv_req_rescan = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8436) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8439) if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8440) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8442) h->events = readl(&(h->cfgtable->event_notify));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8443) return h->events & RESCAN_REQUIRED_EVENT_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8447) * Check if any of the offline devices have become ready
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8448) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8449) static int hpsa_offline_devices_ready(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8451) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8452) struct offline_device_entry *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8453) struct list_head *this, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8455) spin_lock_irqsave(&h->offline_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8456) list_for_each_safe(this, tmp, &h->offline_device_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8457) d = list_entry(this, struct offline_device_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8458) offline_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8459) spin_unlock_irqrestore(&h->offline_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8460) if (!hpsa_volume_offline(h, d->scsi3addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8461) spin_lock_irqsave(&h->offline_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8462) list_del(&d->offline_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8463) spin_unlock_irqrestore(&h->offline_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8464) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8466) spin_lock_irqsave(&h->offline_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8468) spin_unlock_irqrestore(&h->offline_device_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8469) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8472) static int hpsa_luns_changed(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8474) int rc = 1; /* assume there are changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8475) struct ReportLUNdata *logdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8477) /* if we can't find out if lun data has changed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8478) * assume that it has.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8481) if (!h->lastlogicals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8482) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8484) logdev = kzalloc(sizeof(*logdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8485) if (!logdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8486) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8488) if (hpsa_scsi_do_report_luns(h, 1, logdev, sizeof(*logdev), 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8489) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8490) "report luns failed, can't track lun changes.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8491) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8493) if (memcmp(logdev, h->lastlogicals, sizeof(*logdev))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8494) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8495) "Lun changes detected.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8496) memcpy(h->lastlogicals, logdev, sizeof(*logdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8497) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8498) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8499) rc = 0; /* no changes detected. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8500) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8501) kfree(logdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8502) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8505) static void hpsa_perform_rescan(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8507) struct Scsi_Host *sh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8508) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8510) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8511) * Do the scan after the reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8513) spin_lock_irqsave(&h->reset_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8514) if (h->reset_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8515) h->drv_req_rescan = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8516) spin_unlock_irqrestore(&h->reset_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8517) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8519) spin_unlock_irqrestore(&h->reset_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8521) sh = scsi_host_get(h->scsi_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8522) if (sh != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8523) hpsa_scan_start(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8524) scsi_host_put(sh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8525) h->drv_req_rescan = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8527) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8529) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8530) * watch for controller events
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8531) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8532) static void hpsa_event_monitor_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8534) struct ctlr_info *h = container_of(to_delayed_work(work),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8535) struct ctlr_info, event_monitor_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8536) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8537)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8538) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8539) if (h->remove_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8540) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8541) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8543) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8545) if (hpsa_ctlr_needs_rescan(h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8546) hpsa_ack_ctlr_events(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8547) hpsa_perform_rescan(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8550) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8551) if (!h->remove_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8552) queue_delayed_work(h->monitor_ctlr_wq, &h->event_monitor_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8553) HPSA_EVENT_MONITOR_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8554) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8557) static void hpsa_rescan_ctlr_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8559) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8560) struct ctlr_info *h = container_of(to_delayed_work(work),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8561) struct ctlr_info, rescan_ctlr_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8563) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8564) if (h->remove_in_progress) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8565) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8566) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8568) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8570) if (h->drv_req_rescan || hpsa_offline_devices_ready(h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8571) hpsa_perform_rescan(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8572) } else if (h->discovery_polling) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8573) if (hpsa_luns_changed(h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8574) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8575) "driver discovery polling rescan.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8576) hpsa_perform_rescan(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8579) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8580) if (!h->remove_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8581) queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8582) h->heartbeat_sample_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8583) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8586) static void hpsa_monitor_ctlr_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8588) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8589) struct ctlr_info *h = container_of(to_delayed_work(work),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8590) struct ctlr_info, monitor_ctlr_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8592) detect_controller_lockup(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8593) if (lockup_detected(h))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8594) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8596) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8597) if (!h->remove_in_progress)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8598) queue_delayed_work(h->monitor_ctlr_wq, &h->monitor_ctlr_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8599) h->heartbeat_sample_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8600) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8603) static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8604) char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8605) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8606) struct workqueue_struct *wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8608) wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8609) if (!wq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8610) dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8612) return wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8615) static void hpda_free_ctlr_info(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8617) kfree(h->reply_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8618) kfree(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8621) static struct ctlr_info *hpda_alloc_ctlr_info(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8623) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8625) h = kzalloc(sizeof(*h), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8626) if (!h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8627) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8629) h->reply_map = kcalloc(nr_cpu_ids, sizeof(*h->reply_map), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8630) if (!h->reply_map) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8631) kfree(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8632) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8634) return h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8636)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8637) static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8639) int dac, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8640) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8641) int try_soft_reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8642) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8643) u32 board_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8645) if (number_of_controllers == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8646) printk(KERN_INFO DRIVER_NAME "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8648) rc = hpsa_lookup_board_id(pdev, &board_id, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8649) if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8650) dev_warn(&pdev->dev, "Board ID not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8651) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8652) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8654) rc = hpsa_init_reset_devices(pdev, board_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8655) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8656) if (rc != -ENOTSUPP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8657) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8658) /* If the reset fails in a particular way (it has no way to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8659) * a proper hard reset, so returns -ENOTSUPP) we can try to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8660) * a soft reset once we get the controller configured up to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8661) * point that it can accept a command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8663) try_soft_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8664) rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8667) reinit_after_soft_reset:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8669) /* Command structures must be aligned on a 32-byte boundary because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8670) * the 5 lower bits of the address are used by the hardware. and by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8671) * the driver. See comments in hpsa.h for more info.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8672) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8673) BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8674) h = hpda_alloc_ctlr_info();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8675) if (!h) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8676) dev_err(&pdev->dev, "Failed to allocate controller head\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8677) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8680) h->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8682) h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8683) INIT_LIST_HEAD(&h->offline_device_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8684) spin_lock_init(&h->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8685) spin_lock_init(&h->offline_device_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8686) spin_lock_init(&h->scan_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8687) spin_lock_init(&h->reset_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8688) atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8690) /* Allocate and clear per-cpu variable lockup_detected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8691) h->lockup_detected = alloc_percpu(u32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8692) if (!h->lockup_detected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8693) dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8694) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8695) goto clean1; /* aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8696) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8697) set_lockup_detected_for_all_cpus(h, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8699) rc = hpsa_pci_init(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8700) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8701) goto clean2; /* lu, aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8703) /* relies on h-> settings made by hpsa_pci_init, including
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8704) * interrupt_mode h->intr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8705) rc = hpsa_scsi_host_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8706) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8707) goto clean2_5; /* pci, lu, aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8709) sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8710) h->ctlr = number_of_controllers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8711) number_of_controllers++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8713) /* configure PCI DMA stuff */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8714) rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8715) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8716) dac = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8717) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8718) rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8719) if (rc == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8720) dac = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8721) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8722) dev_err(&pdev->dev, "no suitable DMA available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8723) goto clean3; /* shost, pci, lu, aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8726)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8727) /* make sure the board interrupts are off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8728) h->access.set_intr_mask(h, HPSA_INTR_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8729)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8730) rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8731) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8732) goto clean3; /* shost, pci, lu, aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8733) rc = hpsa_alloc_cmd_pool(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8734) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8735) goto clean4; /* irq, shost, pci, lu, aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8736) rc = hpsa_alloc_sg_chain_blocks(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8737) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8738) goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8739) init_waitqueue_head(&h->scan_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8740) init_waitqueue_head(&h->event_sync_wait_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8741) mutex_init(&h->reset_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8742) h->scan_finished = 1; /* no scan currently in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8743) h->scan_waiting = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8745) pci_set_drvdata(pdev, h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8746) h->ndevices = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8748) spin_lock_init(&h->devlock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8749) rc = hpsa_put_ctlr_into_performant_mode(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8750) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8751) goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8753) /* create the resubmit workqueue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8754) h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8755) if (!h->rescan_ctlr_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8756) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8757) goto clean7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8758) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8760) h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8761) if (!h->resubmit_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8762) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8763) goto clean7; /* aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8766) h->monitor_ctlr_wq = hpsa_create_controller_wq(h, "monitor");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8767) if (!h->monitor_ctlr_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8768) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8769) goto clean7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8772) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8773) * At this point, the controller is ready to take commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8774) * Now, if reset_devices and the hard reset didn't work, try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8775) * the soft reset and see if that works.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8776) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8777) if (try_soft_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8779) /* This is kind of gross. We may or may not get a completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8780) * from the soft reset command, and if we do, then the value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8781) * from the fifo may or may not be valid. So, we wait 10 secs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8782) * after the reset throwing away any completions we get during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8783) * that time. Unregister the interrupt handler and register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8784) * fake ones to scoop up any residual completions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8785) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8786) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8787) h->access.set_intr_mask(h, HPSA_INTR_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8788) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8789) hpsa_free_irqs(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8790) rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8791) hpsa_intx_discard_completions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8792) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8793) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8794) "Failed to request_irq after soft reset.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8796) * cannot goto clean7 or free_irqs will be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8797) * again. Instead, do its work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8798) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8799) hpsa_free_performant_mode(h); /* clean7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8800) hpsa_free_sg_chain_blocks(h); /* clean6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8801) hpsa_free_cmd_pool(h); /* clean5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8802) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8803) * skip hpsa_free_irqs(h) clean4 since that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8804) * was just called before request_irqs failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8805) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8806) goto clean3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8809) rc = hpsa_kdump_soft_reset(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8810) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8811) /* Neither hard nor soft reset worked, we're hosed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8812) goto clean7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8814) dev_info(&h->pdev->dev, "Board READY.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8815) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8816) "Waiting for stale completions to drain.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8817) h->access.set_intr_mask(h, HPSA_INTR_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8818) msleep(10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8819) h->access.set_intr_mask(h, HPSA_INTR_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8821) rc = controller_reset_failed(h->cfgtable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8822) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8823) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8824) "Soft reset appears to have failed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8826) /* since the controller's reset, we have to go back and re-init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8827) * everything. Easiest to just forget what we've done and do it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8828) * all over again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8829) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8830) hpsa_undo_allocations_after_kdump_soft_reset(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8831) try_soft_reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8832) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8833) /* don't goto clean, we already unallocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8834) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8835)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8836) goto reinit_after_soft_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8839) /* Enable Accelerated IO path at driver layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8840) h->acciopath_status = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8841) /* Disable discovery polling.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8842) h->discovery_polling = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8845) /* Turn the interrupts on so we can service requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8846) h->access.set_intr_mask(h, HPSA_INTR_ON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8848) hpsa_hba_inquiry(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8850) h->lastlogicals = kzalloc(sizeof(*(h->lastlogicals)), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8851) if (!h->lastlogicals)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8852) dev_info(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8853) "Can't track change to report lun data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8855) /* hook into SCSI subsystem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8856) rc = hpsa_scsi_add_host(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8857) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8858) goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8860) /* Monitor the controller for firmware lockups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8861) h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8862) INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8863) schedule_delayed_work(&h->monitor_ctlr_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8864) h->heartbeat_sample_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8865) INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8866) queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8867) h->heartbeat_sample_interval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8868) INIT_DELAYED_WORK(&h->event_monitor_work, hpsa_event_monitor_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8869) schedule_delayed_work(&h->event_monitor_work,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8870) HPSA_EVENT_MONITOR_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8873) clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8874) kfree(h->lastlogicals);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8875) clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8876) hpsa_free_performant_mode(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8877) h->access.set_intr_mask(h, HPSA_INTR_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8878) clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8879) hpsa_free_sg_chain_blocks(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8880) clean5: /* cmd, irq, shost, pci, lu, aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8881) hpsa_free_cmd_pool(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8882) clean4: /* irq, shost, pci, lu, aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8883) hpsa_free_irqs(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8884) clean3: /* shost, pci, lu, aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8885) scsi_host_put(h->scsi_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8886) h->scsi_host = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8887) clean2_5: /* pci, lu, aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8888) hpsa_free_pci_init(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8889) clean2: /* lu, aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8890) if (h->lockup_detected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8891) free_percpu(h->lockup_detected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8892) h->lockup_detected = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8894) clean1: /* wq/aer/h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8895) if (h->resubmit_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8896) destroy_workqueue(h->resubmit_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8897) h->resubmit_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8899) if (h->rescan_ctlr_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8900) destroy_workqueue(h->rescan_ctlr_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8901) h->rescan_ctlr_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8902) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8903) if (h->monitor_ctlr_wq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8904) destroy_workqueue(h->monitor_ctlr_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8905) h->monitor_ctlr_wq = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8907) kfree(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8908) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8909) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8911) static void hpsa_flush_cache(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8913) char *flush_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8914) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8915) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8917) if (unlikely(lockup_detected(h)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8918) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8919) flush_buf = kzalloc(4, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8920) if (!flush_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8921) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8923) c = cmd_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8925) if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8926) RAID_CTLR_LUNID, TYPE_CMD)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8927) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8928) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8929) rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8930) DEFAULT_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8931) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8932) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8933) if (c->err_info->CommandStatus != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8934) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8935) dev_warn(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8936) "error flushing cache on controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8937) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8938) kfree(flush_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8941) /* Make controller gather fresh report lun data each time we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8942) * send down a report luns request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8944) static void hpsa_disable_rld_caching(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8946) u32 *options;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8947) struct CommandList *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8948) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8949)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8950) /* Don't bother trying to set diag options if locked up */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8951) if (unlikely(h->lockup_detected))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8952) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8953)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8954) options = kzalloc(sizeof(*options), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8955) if (!options)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8956) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8957)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8958) c = cmd_alloc(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8960) /* first, get the current diag options settings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8961) if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8962) RAID_CTLR_LUNID, TYPE_CMD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8963) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8965) rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8966) NO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8967) if ((rc != 0) || (c->err_info->CommandStatus != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8968) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8970) /* Now, set the bit for disabling the RLD caching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8971) *options |= HPSA_DIAG_OPTS_DISABLE_RLD_CACHING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8973) if (fill_cmd(c, BMIC_SET_DIAG_OPTIONS, h, options, 4, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8974) RAID_CTLR_LUNID, TYPE_CMD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8975) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8977) rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8978) NO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8979) if ((rc != 0) || (c->err_info->CommandStatus != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8980) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8981)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8982) /* Now verify that it got set: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8983) if (fill_cmd(c, BMIC_SENSE_DIAG_OPTIONS, h, options, 4, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8984) RAID_CTLR_LUNID, TYPE_CMD))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8985) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8986)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8987) rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8988) NO_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8989) if ((rc != 0) || (c->err_info->CommandStatus != 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8990) goto errout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8992) if (*options & HPSA_DIAG_OPTS_DISABLE_RLD_CACHING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8993) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8995) errout:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8996) dev_err(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8997) "Error: failed to disable report lun data caching.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8998) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8999) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9000) kfree(options);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9003) static void __hpsa_shutdown(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9005) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9007) h = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9008) /* Turn board interrupts off and send the flush cache command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9009) * sendcmd will turn off interrupt, and send the flush...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9010) * To write all data in the battery backed cache to disks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9012) hpsa_flush_cache(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9013) h->access.set_intr_mask(h, HPSA_INTR_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9014) hpsa_free_irqs(h); /* init_one 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9015) hpsa_disable_interrupt_mode(h); /* pci_init 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9018) static void hpsa_shutdown(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9020) __hpsa_shutdown(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9021) pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9024) static void hpsa_free_device_info(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9026) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9028) for (i = 0; i < h->ndevices; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9029) kfree(h->dev[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9030) h->dev[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9031) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9032) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9034) static void hpsa_remove_one(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9036) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9037) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9039) if (pci_get_drvdata(pdev) == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9040) dev_err(&pdev->dev, "unable to remove device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9041) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9043) h = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9045) /* Get rid of any controller monitoring work items */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9046) spin_lock_irqsave(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9047) h->remove_in_progress = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9048) spin_unlock_irqrestore(&h->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9049) cancel_delayed_work_sync(&h->monitor_ctlr_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9050) cancel_delayed_work_sync(&h->rescan_ctlr_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9051) cancel_delayed_work_sync(&h->event_monitor_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9052) destroy_workqueue(h->rescan_ctlr_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9053) destroy_workqueue(h->resubmit_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9054) destroy_workqueue(h->monitor_ctlr_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9056) hpsa_delete_sas_host(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9057)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9058) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9059) * Call before disabling interrupts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9060) * scsi_remove_host can trigger I/O operations especially
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9061) * when multipath is enabled. There can be SYNCHRONIZE CACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9062) * operations which cannot complete and will hang the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9063) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9064) if (h->scsi_host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9065) scsi_remove_host(h->scsi_host); /* init_one 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9066) /* includes hpsa_free_irqs - init_one 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9067) /* includes hpsa_disable_interrupt_mode - pci_init 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9068) __hpsa_shutdown(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9070) hpsa_free_device_info(h); /* scan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9071)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9072) kfree(h->hba_inquiry_data); /* init_one 10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9073) h->hba_inquiry_data = NULL; /* init_one 10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9074) hpsa_free_ioaccel2_sg_chain_blocks(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9075) hpsa_free_performant_mode(h); /* init_one 7 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9076) hpsa_free_sg_chain_blocks(h); /* init_one 6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9077) hpsa_free_cmd_pool(h); /* init_one 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9078) kfree(h->lastlogicals);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9080) /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9082) scsi_host_put(h->scsi_host); /* init_one 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9083) h->scsi_host = NULL; /* init_one 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9085) /* includes hpsa_disable_interrupt_mode - pci_init 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9086) hpsa_free_pci_init(h); /* init_one 2.5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9088) free_percpu(h->lockup_detected); /* init_one 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9089) h->lockup_detected = NULL; /* init_one 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9090) /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9092) hpda_free_ctlr_info(h); /* init_one 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9095) static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9096) __attribute__((unused)) pm_message_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9097) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9098) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9099) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9101) static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9103) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9106) static struct pci_driver hpsa_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9107) .name = HPSA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9108) .probe = hpsa_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9109) .remove = hpsa_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9110) .id_table = hpsa_pci_device_id, /* id_table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9111) .shutdown = hpsa_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9112) .suspend = hpsa_suspend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9113) .resume = hpsa_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9116) /* Fill in bucket_map[], given nsgs (the max number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9117) * scatter gather elements supported) and bucket[],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9118) * which is an array of 8 integers. The bucket[] array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9119) * contains 8 different DMA transfer sizes (in 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9120) * byte increments) which the controller uses to fetch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9121) * commands. This function fills in bucket_map[], which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9122) * maps a given number of scatter gather elements to one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9123) * the 8 DMA transfer sizes. The point of it is to allow the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9124) * controller to only do as much DMA as needed to fetch the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9125) * command, with the DMA transfer size encoded in the lower
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9126) * bits of the command address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9128) static void calc_bucket_map(int bucket[], int num_buckets,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9129) int nsgs, int min_blocks, u32 *bucket_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9131) int i, j, b, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9133) /* Note, bucket_map must have nsgs+1 entries. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9134) for (i = 0; i <= nsgs; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9135) /* Compute size of a command with i SG entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9136) size = i + min_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9137) b = num_buckets; /* Assume the biggest bucket */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9138) /* Find the bucket that is just big enough */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9139) for (j = 0; j < num_buckets; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9140) if (bucket[j] >= size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9141) b = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9142) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9145) /* for a command with i SG entries, use bucket b. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9146) bucket_map[i] = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9150) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9151) * return -ENODEV on err, 0 on success (or no action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9152) * allocates numerous items that must be freed later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9154) static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9156) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9157) unsigned long register_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9158) unsigned long transMethod = CFGTBL_Trans_Performant |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9159) (trans_support & CFGTBL_Trans_use_short_tags) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9160) CFGTBL_Trans_enable_directed_msix |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9161) (trans_support & (CFGTBL_Trans_io_accel1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9162) CFGTBL_Trans_io_accel2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9163) struct access_method access = SA5_performant_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9165) /* This is a bit complicated. There are 8 registers on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9166) * the controller which we write to to tell it 8 different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9167) * sizes of commands which there may be. It's a way of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9168) * reducing the DMA done to fetch each command. Encoded into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9169) * each command's tag are 3 bits which communicate to the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9170) * which of the eight sizes that command fits within. The size of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9171) * each command depends on how many scatter gather entries there are.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9172) * Each SG entry requires 16 bytes. The eight registers are programmed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9173) * with the number of 16-byte blocks a command of that size requires.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9174) * The smallest command possible requires 5 such 16 byte blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9175) * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9176) * blocks. Note, this only extends to the SG entries contained
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9177) * within the command block, and does not extend to chained blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9178) * of SG elements. bft[] contains the eight values we write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9179) * the registers. They are not evenly distributed, but have more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9180) * sizes for small commands, and fewer sizes for larger commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9182) int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9183) #define MIN_IOACCEL2_BFT_ENTRY 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9184) #define HPSA_IOACCEL2_HEADER_SZ 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9185) int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9186) 13, 14, 15, 16, 17, 18, 19,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9187) HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9188) BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9189) BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9190) BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9191) 16 * MIN_IOACCEL2_BFT_ENTRY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9192) BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9193) BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9194) /* 5 = 1 s/g entry or 4k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9195) * 6 = 2 s/g entry or 8k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9196) * 8 = 4 s/g entry or 16k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9197) * 10 = 6 s/g entry or 24k
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9200) /* If the controller supports either ioaccel method then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9201) * we can also use the RAID stack submit path that does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9202) * perform the superfluous readl() after each command submission.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9204) if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9205) access = SA5_performant_access_no_read;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9207) /* Controller spec: zero out this buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9208) for (i = 0; i < h->nreply_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9209) memset(h->reply_queue[i].head, 0, h->reply_queue_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9211) bft[7] = SG_ENTRIES_IN_CMD + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9212) calc_bucket_map(bft, ARRAY_SIZE(bft),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9213) SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9214) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9215) writel(bft[i], &h->transtable->BlockFetch[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9217) /* size of controller ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9218) writel(h->max_commands, &h->transtable->RepQSize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9219) writel(h->nreply_queues, &h->transtable->RepQCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9220) writel(0, &h->transtable->RepQCtrAddrLow32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9221) writel(0, &h->transtable->RepQCtrAddrHigh32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9223) for (i = 0; i < h->nreply_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9224) writel(0, &h->transtable->RepQAddr[i].upper);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9225) writel(h->reply_queue[i].busaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9226) &h->transtable->RepQAddr[i].lower);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9229) writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9230) writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9231) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9232) * enable outbound interrupt coalescing in accelerator mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9233) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9234) if (trans_support & CFGTBL_Trans_io_accel1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9235) access = SA5_ioaccel_mode1_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9236) writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9237) writel(4, &h->cfgtable->HostWrite.CoalIntCount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9238) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9239) if (trans_support & CFGTBL_Trans_io_accel2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9240) access = SA5_ioaccel_mode2_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9241) writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9242) if (hpsa_wait_for_mode_change_ack(h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9243) dev_err(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9244) "performant mode problem - doorbell timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9245) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9247) register_value = readl(&(h->cfgtable->TransportActive));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9248) if (!(register_value & CFGTBL_Trans_Performant)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9249) dev_err(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9250) "performant mode problem - transport not active\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9251) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9253) /* Change the access methods to the performant access methods */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9254) h->access = access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9255) h->transMethod = transMethod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9257) if (!((trans_support & CFGTBL_Trans_io_accel1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9258) (trans_support & CFGTBL_Trans_io_accel2)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9259) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9261) if (trans_support & CFGTBL_Trans_io_accel1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9262) /* Set up I/O accelerator mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9263) for (i = 0; i < h->nreply_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9264) writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9265) h->reply_queue[i].current_entry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9266) readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9268) bft[7] = h->ioaccel_maxsg + 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9269) calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9270) h->ioaccel1_blockFetchTable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9272) /* initialize all reply queue entries to unused */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9273) for (i = 0; i < h->nreply_queues; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9274) memset(h->reply_queue[i].head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9275) (u8) IOACCEL_MODE1_REPLY_UNUSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9276) h->reply_queue_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9278) /* set all the constant fields in the accelerator command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9279) * frames once at init time to save CPU cycles later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9280) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9281) for (i = 0; i < h->nr_cmds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9282) struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9284) cp->function = IOACCEL1_FUNCTION_SCSIIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9285) cp->err_info = (u32) (h->errinfo_pool_dhandle +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9286) (i * sizeof(struct ErrorInfo)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9287) cp->err_info_len = sizeof(struct ErrorInfo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9288) cp->sgl_offset = IOACCEL1_SGLOFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9289) cp->host_context_flags =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9290) cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9291) cp->timeout_sec = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9292) cp->ReplyQueue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9293) cp->tag =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9294) cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9295) cp->host_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9296) cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9297) (i * sizeof(struct io_accel1_cmd)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9299) } else if (trans_support & CFGTBL_Trans_io_accel2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9300) u64 cfg_offset, cfg_base_addr_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9301) u32 bft2_offset, cfg_base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9302) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9304) rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9305) &cfg_base_addr_index, &cfg_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9306) BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9307) bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9308) calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9309) 4, h->ioaccel2_blockFetchTable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9310) bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9311) BUILD_BUG_ON(offsetof(struct CfgTable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9312) io_accel_request_size_offset) != 0xb8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9313) h->ioaccel2_bft2_regs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9314) remap_pci_mem(pci_resource_start(h->pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9315) cfg_base_addr_index) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9316) cfg_offset + bft2_offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9317) ARRAY_SIZE(bft2) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9318) sizeof(*h->ioaccel2_bft2_regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9319) for (i = 0; i < ARRAY_SIZE(bft2); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9320) writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9322) writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9323) if (hpsa_wait_for_mode_change_ack(h)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9324) dev_err(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9325) "performant mode problem - enabling ioaccel mode\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9326) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9328) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9331) /* Free ioaccel1 mode command blocks and block fetch table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9332) static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9334) if (h->ioaccel_cmd_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9335) dma_free_coherent(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9336) h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9337) h->ioaccel_cmd_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9338) h->ioaccel_cmd_pool_dhandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9339) h->ioaccel_cmd_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9340) h->ioaccel_cmd_pool_dhandle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9342) kfree(h->ioaccel1_blockFetchTable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9343) h->ioaccel1_blockFetchTable = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9346) /* Allocate ioaccel1 mode command blocks and block fetch table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9347) static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9349) h->ioaccel_maxsg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9350) readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9351) if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9352) h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9354) /* Command structures must be aligned on a 128-byte boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9355) * because the 7 lower bits of the address are used by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9356) * hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9357) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9358) BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9359) IOACCEL1_COMMANDLIST_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9360) h->ioaccel_cmd_pool =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9361) dma_alloc_coherent(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9362) h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9363) &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9365) h->ioaccel1_blockFetchTable =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9366) kmalloc(((h->ioaccel_maxsg + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9367) sizeof(u32)), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9369) if ((h->ioaccel_cmd_pool == NULL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9370) (h->ioaccel1_blockFetchTable == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9371) goto clean_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9373) memset(h->ioaccel_cmd_pool, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9374) h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9375) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9377) clean_up:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9378) hpsa_free_ioaccel1_cmd_and_bft(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9379) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9382) /* Free ioaccel2 mode command blocks and block fetch table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9383) static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9385) hpsa_free_ioaccel2_sg_chain_blocks(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9387) if (h->ioaccel2_cmd_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9388) dma_free_coherent(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9389) h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9390) h->ioaccel2_cmd_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9391) h->ioaccel2_cmd_pool_dhandle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9392) h->ioaccel2_cmd_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9393) h->ioaccel2_cmd_pool_dhandle = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9395) kfree(h->ioaccel2_blockFetchTable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9396) h->ioaccel2_blockFetchTable = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9399) /* Allocate ioaccel2 mode command blocks and block fetch table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9400) static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9402) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9404) /* Allocate ioaccel2 mode command blocks and block fetch table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9406) h->ioaccel_maxsg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9407) readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9408) if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9409) h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9411) BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9412) IOACCEL2_COMMANDLIST_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9413) h->ioaccel2_cmd_pool =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9414) dma_alloc_coherent(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9415) h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9416) &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9418) h->ioaccel2_blockFetchTable =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9419) kmalloc(((h->ioaccel_maxsg + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9420) sizeof(u32)), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9422) if ((h->ioaccel2_cmd_pool == NULL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9423) (h->ioaccel2_blockFetchTable == NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9424) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9425) goto clean_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9428) rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9429) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9430) goto clean_up;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9432) memset(h->ioaccel2_cmd_pool, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9433) h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9434) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9436) clean_up:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9437) hpsa_free_ioaccel2_cmd_and_bft(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9438) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9441) /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9442) static void hpsa_free_performant_mode(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9444) kfree(h->blockFetchTable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9445) h->blockFetchTable = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9446) hpsa_free_reply_queues(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9447) hpsa_free_ioaccel1_cmd_and_bft(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9448) hpsa_free_ioaccel2_cmd_and_bft(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9451) /* return -ENODEV on error, 0 on success (or no action)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9452) * allocates numerous items that must be freed later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9454) static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9456) u32 trans_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9457) unsigned long transMethod = CFGTBL_Trans_Performant |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9458) CFGTBL_Trans_use_short_tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9459) int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9461) if (hpsa_simple_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9462) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9464) trans_support = readl(&(h->cfgtable->TransportSupport));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9465) if (!(trans_support & PERFORMANT_MODE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9466) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9468) /* Check for I/O accelerator mode support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9469) if (trans_support & CFGTBL_Trans_io_accel1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9470) transMethod |= CFGTBL_Trans_io_accel1 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9471) CFGTBL_Trans_enable_directed_msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9472) rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9473) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9474) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9475) } else if (trans_support & CFGTBL_Trans_io_accel2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9476) transMethod |= CFGTBL_Trans_io_accel2 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9477) CFGTBL_Trans_enable_directed_msix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9478) rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9479) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9480) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9483) h->nreply_queues = h->msix_vectors > 0 ? h->msix_vectors : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9484) hpsa_get_max_perf_mode_cmds(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9485) /* Performant mode ring buffer and supporting data structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9486) h->reply_queue_size = h->max_commands * sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9488) for (i = 0; i < h->nreply_queues; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9489) h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9490) h->reply_queue_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9491) &h->reply_queue[i].busaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9492) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9493) if (!h->reply_queue[i].head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9494) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9495) goto clean1; /* rq, ioaccel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9497) h->reply_queue[i].size = h->max_commands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9498) h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9499) h->reply_queue[i].current_entry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9502) /* Need a block fetch table for performant mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9503) h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9504) sizeof(u32)), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9505) if (!h->blockFetchTable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9506) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9507) goto clean1; /* rq, ioaccel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9510) rc = hpsa_enter_performant_mode(h, trans_support);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9511) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9512) goto clean2; /* bft, rq, ioaccel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9513) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9515) clean2: /* bft, rq, ioaccel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9516) kfree(h->blockFetchTable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9517) h->blockFetchTable = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9518) clean1: /* rq, ioaccel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9519) hpsa_free_reply_queues(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9520) hpsa_free_ioaccel1_cmd_and_bft(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9521) hpsa_free_ioaccel2_cmd_and_bft(h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9522) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9525) static int is_accelerated_cmd(struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9527) return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9530) static void hpsa_drain_accel_commands(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9532) struct CommandList *c = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9533) int i, accel_cmds_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9534) int refcount;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9536) do { /* wait for all outstanding ioaccel commands to drain out */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9537) accel_cmds_out = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9538) for (i = 0; i < h->nr_cmds; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9539) c = h->cmd_pool + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9540) refcount = atomic_inc_return(&c->refcount);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9541) if (refcount > 1) /* Command is allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9542) accel_cmds_out += is_accelerated_cmd(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9543) cmd_free(h, c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9545) if (accel_cmds_out <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9546) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9547) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9548) } while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9551) static struct hpsa_sas_phy *hpsa_alloc_sas_phy(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9552) struct hpsa_sas_port *hpsa_sas_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9554) struct hpsa_sas_phy *hpsa_sas_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9555) struct sas_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9557) hpsa_sas_phy = kzalloc(sizeof(*hpsa_sas_phy), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9558) if (!hpsa_sas_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9559) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9561) phy = sas_phy_alloc(hpsa_sas_port->parent_node->parent_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9562) hpsa_sas_port->next_phy_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9563) if (!phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9564) kfree(hpsa_sas_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9565) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9568) hpsa_sas_port->next_phy_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9569) hpsa_sas_phy->phy = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9570) hpsa_sas_phy->parent_port = hpsa_sas_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9572) return hpsa_sas_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9575) static void hpsa_free_sas_phy(struct hpsa_sas_phy *hpsa_sas_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9577) struct sas_phy *phy = hpsa_sas_phy->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9579) sas_port_delete_phy(hpsa_sas_phy->parent_port->port, phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9580) if (hpsa_sas_phy->added_to_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9581) list_del(&hpsa_sas_phy->phy_list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9582) sas_phy_delete(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9583) kfree(hpsa_sas_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9586) static int hpsa_sas_port_add_phy(struct hpsa_sas_phy *hpsa_sas_phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9588) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9589) struct hpsa_sas_port *hpsa_sas_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9590) struct sas_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9591) struct sas_identify *identify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9592)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9593) hpsa_sas_port = hpsa_sas_phy->parent_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9594) phy = hpsa_sas_phy->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9596) identify = &phy->identify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9597) memset(identify, 0, sizeof(*identify));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9598) identify->sas_address = hpsa_sas_port->sas_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9599) identify->device_type = SAS_END_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9600) identify->initiator_port_protocols = SAS_PROTOCOL_STP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9601) identify->target_port_protocols = SAS_PROTOCOL_STP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9602) phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9603) phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9604) phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9605) phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9606) phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9608) rc = sas_phy_add(hpsa_sas_phy->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9609) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9610) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9612) sas_port_add_phy(hpsa_sas_port->port, hpsa_sas_phy->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9613) list_add_tail(&hpsa_sas_phy->phy_list_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9614) &hpsa_sas_port->phy_list_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9615) hpsa_sas_phy->added_to_port = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9617) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9620) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9621) hpsa_sas_port_add_rphy(struct hpsa_sas_port *hpsa_sas_port,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9622) struct sas_rphy *rphy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9624) struct sas_identify *identify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9626) identify = &rphy->identify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9627) identify->sas_address = hpsa_sas_port->sas_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9628) identify->initiator_port_protocols = SAS_PROTOCOL_STP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9629) identify->target_port_protocols = SAS_PROTOCOL_STP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9631) return sas_rphy_add(rphy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9634) static struct hpsa_sas_port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9635) *hpsa_alloc_sas_port(struct hpsa_sas_node *hpsa_sas_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9636) u64 sas_address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9638) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9639) struct hpsa_sas_port *hpsa_sas_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9640) struct sas_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9642) hpsa_sas_port = kzalloc(sizeof(*hpsa_sas_port), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9643) if (!hpsa_sas_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9644) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9646) INIT_LIST_HEAD(&hpsa_sas_port->phy_list_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9647) hpsa_sas_port->parent_node = hpsa_sas_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9649) port = sas_port_alloc_num(hpsa_sas_node->parent_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9650) if (!port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9651) goto free_hpsa_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9653) rc = sas_port_add(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9654) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9655) goto free_sas_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9657) hpsa_sas_port->port = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9658) hpsa_sas_port->sas_address = sas_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9659) list_add_tail(&hpsa_sas_port->port_list_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9660) &hpsa_sas_node->port_list_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9662) return hpsa_sas_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9664) free_sas_port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9665) sas_port_free(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9666) free_hpsa_port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9667) kfree(hpsa_sas_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9669) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9672) static void hpsa_free_sas_port(struct hpsa_sas_port *hpsa_sas_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9674) struct hpsa_sas_phy *hpsa_sas_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9675) struct hpsa_sas_phy *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9677) list_for_each_entry_safe(hpsa_sas_phy, next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9678) &hpsa_sas_port->phy_list_head, phy_list_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9679) hpsa_free_sas_phy(hpsa_sas_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9681) sas_port_delete(hpsa_sas_port->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9682) list_del(&hpsa_sas_port->port_list_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9683) kfree(hpsa_sas_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9686) static struct hpsa_sas_node *hpsa_alloc_sas_node(struct device *parent_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9688) struct hpsa_sas_node *hpsa_sas_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9690) hpsa_sas_node = kzalloc(sizeof(*hpsa_sas_node), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9691) if (hpsa_sas_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9692) hpsa_sas_node->parent_dev = parent_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9693) INIT_LIST_HEAD(&hpsa_sas_node->port_list_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9696) return hpsa_sas_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9699) static void hpsa_free_sas_node(struct hpsa_sas_node *hpsa_sas_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9701) struct hpsa_sas_port *hpsa_sas_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9702) struct hpsa_sas_port *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9704) if (!hpsa_sas_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9705) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9707) list_for_each_entry_safe(hpsa_sas_port, next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9708) &hpsa_sas_node->port_list_head, port_list_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9709) hpsa_free_sas_port(hpsa_sas_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9711) kfree(hpsa_sas_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9713)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9714) static struct hpsa_scsi_dev_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9715) *hpsa_find_device_by_sas_rphy(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9716) struct sas_rphy *rphy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9718) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9719) struct hpsa_scsi_dev_t *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9721) for (i = 0; i < h->ndevices; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9722) device = h->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9723) if (!device->sas_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9724) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9725) if (device->sas_port->rphy == rphy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9726) return device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9729) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9730) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9732) static int hpsa_add_sas_host(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9734) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9735) struct device *parent_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9736) struct hpsa_sas_node *hpsa_sas_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9737) struct hpsa_sas_port *hpsa_sas_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9738) struct hpsa_sas_phy *hpsa_sas_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9740) parent_dev = &h->scsi_host->shost_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9742) hpsa_sas_node = hpsa_alloc_sas_node(parent_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9743) if (!hpsa_sas_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9744) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9745)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9746) hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, h->sas_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9747) if (!hpsa_sas_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9748) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9749) goto free_sas_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9752) hpsa_sas_phy = hpsa_alloc_sas_phy(hpsa_sas_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9753) if (!hpsa_sas_phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9754) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9755) goto free_sas_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9758) rc = hpsa_sas_port_add_phy(hpsa_sas_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9759) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9760) goto free_sas_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9761)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9762) h->sas_host = hpsa_sas_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9763)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9764) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9766) free_sas_phy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9767) hpsa_free_sas_phy(hpsa_sas_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9768) free_sas_port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9769) hpsa_free_sas_port(hpsa_sas_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9770) free_sas_node:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9771) hpsa_free_sas_node(hpsa_sas_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9773) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9776) static void hpsa_delete_sas_host(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9777) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9778) hpsa_free_sas_node(h->sas_host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9781) static int hpsa_add_sas_device(struct hpsa_sas_node *hpsa_sas_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9782) struct hpsa_scsi_dev_t *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9784) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9785) struct hpsa_sas_port *hpsa_sas_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9786) struct sas_rphy *rphy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9788) hpsa_sas_port = hpsa_alloc_sas_port(hpsa_sas_node, device->sas_address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9789) if (!hpsa_sas_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9790) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9792) rphy = sas_end_device_alloc(hpsa_sas_port->port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9793) if (!rphy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9794) rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9795) goto free_sas_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9796) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9798) hpsa_sas_port->rphy = rphy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9799) device->sas_port = hpsa_sas_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9801) rc = hpsa_sas_port_add_rphy(hpsa_sas_port, rphy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9802) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9803) goto free_sas_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9804)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9805) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9806)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9807) free_sas_port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9808) hpsa_free_sas_port(hpsa_sas_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9809) device->sas_port = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9811) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9814) static void hpsa_remove_sas_device(struct hpsa_scsi_dev_t *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9816) if (device->sas_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9817) hpsa_free_sas_port(device->sas_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9818) device->sas_port = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9821)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9822) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9823) hpsa_sas_get_linkerrors(struct sas_phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9825) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9828) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9829) hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9831) struct Scsi_Host *shost = phy_to_shost(rphy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9832) struct ctlr_info *h;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9833) struct hpsa_scsi_dev_t *sd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9835) if (!shost)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9836) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9837)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9838) h = shost_to_hba(shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9840) if (!h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9841) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9843) sd = hpsa_find_device_by_sas_rphy(h, rphy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9844) if (!sd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9845) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9847) *identifier = sd->eli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9849) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9851)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9852) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9853) hpsa_sas_get_bay_identifier(struct sas_rphy *rphy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9855) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9858) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9859) hpsa_sas_phy_reset(struct sas_phy *phy, int hard_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9861) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9863)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9864) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9865) hpsa_sas_phy_enable(struct sas_phy *phy, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9867) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9870) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9871) hpsa_sas_phy_setup(struct sas_phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9873) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9876) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9877) hpsa_sas_phy_release(struct sas_phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9879) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9881) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9882) hpsa_sas_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9884) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9887) static struct sas_function_template hpsa_sas_transport_functions = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9888) .get_linkerrors = hpsa_sas_get_linkerrors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9889) .get_enclosure_identifier = hpsa_sas_get_enclosure_identifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9890) .get_bay_identifier = hpsa_sas_get_bay_identifier,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9891) .phy_reset = hpsa_sas_phy_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9892) .phy_enable = hpsa_sas_phy_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9893) .phy_setup = hpsa_sas_phy_setup,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9894) .phy_release = hpsa_sas_phy_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9895) .set_phy_speed = hpsa_sas_phy_speed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9896) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9898) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9899) * This is it. Register the PCI driver information for the cards we control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9900) * the OS will call our registered routines when it finds one of our cards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9901) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9902) static int __init hpsa_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9904) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9906) hpsa_sas_transport_template =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9907) sas_attach_transport(&hpsa_sas_transport_functions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9908) if (!hpsa_sas_transport_template)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9909) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9911) rc = pci_register_driver(&hpsa_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9913) if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9914) sas_release_transport(hpsa_sas_transport_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9916) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9919) static void __exit hpsa_cleanup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9921) pci_unregister_driver(&hpsa_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9922) sas_release_transport(hpsa_sas_transport_template);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9924)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9925) static void __attribute__((unused)) verify_offsets(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9927) #define VERIFY_OFFSET(member, offset) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9928) BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9930) VERIFY_OFFSET(structure_size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9931) VERIFY_OFFSET(volume_blk_size, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9932) VERIFY_OFFSET(volume_blk_cnt, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9933) VERIFY_OFFSET(phys_blk_shift, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9934) VERIFY_OFFSET(parity_rotation_shift, 17);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9935) VERIFY_OFFSET(strip_size, 18);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9936) VERIFY_OFFSET(disk_starting_blk, 20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9937) VERIFY_OFFSET(disk_blk_cnt, 28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9938) VERIFY_OFFSET(data_disks_per_row, 36);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9939) VERIFY_OFFSET(metadata_disks_per_row, 38);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9940) VERIFY_OFFSET(row_cnt, 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9941) VERIFY_OFFSET(layout_map_count, 42);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9942) VERIFY_OFFSET(flags, 44);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9943) VERIFY_OFFSET(dekindex, 46);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9944) /* VERIFY_OFFSET(reserved, 48 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9945) VERIFY_OFFSET(data, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9947) #undef VERIFY_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9949) #define VERIFY_OFFSET(member, offset) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9950) BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9952) VERIFY_OFFSET(IU_type, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9953) VERIFY_OFFSET(direction, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9954) VERIFY_OFFSET(reply_queue, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9955) /* VERIFY_OFFSET(reserved1, 3); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9956) VERIFY_OFFSET(scsi_nexus, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9957) VERIFY_OFFSET(Tag, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9958) VERIFY_OFFSET(cdb, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9959) VERIFY_OFFSET(cciss_lun, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9960) VERIFY_OFFSET(data_len, 40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9961) VERIFY_OFFSET(cmd_priority_task_attr, 44);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9962) VERIFY_OFFSET(sg_count, 45);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9963) /* VERIFY_OFFSET(reserved3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9964) VERIFY_OFFSET(err_ptr, 48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9965) VERIFY_OFFSET(err_len, 56);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9966) /* VERIFY_OFFSET(reserved4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9967) VERIFY_OFFSET(sg, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9969) #undef VERIFY_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9971) #define VERIFY_OFFSET(member, offset) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9972) BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9974) VERIFY_OFFSET(dev_handle, 0x00);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9975) VERIFY_OFFSET(reserved1, 0x02);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9976) VERIFY_OFFSET(function, 0x03);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9977) VERIFY_OFFSET(reserved2, 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9978) VERIFY_OFFSET(err_info, 0x0C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9979) VERIFY_OFFSET(reserved3, 0x10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9980) VERIFY_OFFSET(err_info_len, 0x12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9981) VERIFY_OFFSET(reserved4, 0x13);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9982) VERIFY_OFFSET(sgl_offset, 0x14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9983) VERIFY_OFFSET(reserved5, 0x15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9984) VERIFY_OFFSET(transfer_len, 0x1C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9985) VERIFY_OFFSET(reserved6, 0x20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9986) VERIFY_OFFSET(io_flags, 0x24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9987) VERIFY_OFFSET(reserved7, 0x26);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9988) VERIFY_OFFSET(LUN, 0x34);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9989) VERIFY_OFFSET(control, 0x3C);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9990) VERIFY_OFFSET(CDB, 0x40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9991) VERIFY_OFFSET(reserved8, 0x50);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9992) VERIFY_OFFSET(host_context_flags, 0x60);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9993) VERIFY_OFFSET(timeout_sec, 0x62);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9994) VERIFY_OFFSET(ReplyQueue, 0x64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9995) VERIFY_OFFSET(reserved9, 0x65);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9996) VERIFY_OFFSET(tag, 0x68);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9997) VERIFY_OFFSET(host_addr, 0x70);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9998) VERIFY_OFFSET(CISS_LUN, 0x78);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9999) VERIFY_OFFSET(SG, 0x78 + 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10000) #undef VERIFY_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10003) module_init(hpsa_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10004) module_exit(hpsa_cleanup);