Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  *    Disk Array driver for HP Smart Array SAS controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *    Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *    Copyright 2016 Microsemi Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *    Copyright 2014-2015 PMC-Sierra, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *    Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *    This program is free software; you can redistribute it and/or modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *    it under the terms of the GNU General Public License as published by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *    the Free Software Foundation; version 2 of the License.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *    This program is distributed in the hope that it will be useful,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  *    Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #ifndef HPSA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define HPSA_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <scsi/scsicam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define IO_OK		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define IO_ERROR	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) struct ctlr_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) struct access_method {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	void (*submit_command)(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 		struct CommandList *c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	void (*set_intr_mask)(struct ctlr_info *h, unsigned long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	bool (*intr_pending)(struct ctlr_info *h);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	unsigned long (*command_completed)(struct ctlr_info *h, u8 q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) /* for SAS hosts and SAS expanders */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) struct hpsa_sas_node {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct device *parent_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	struct list_head port_list_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) struct hpsa_sas_port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	struct list_head port_list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	u64 sas_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	struct sas_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	int next_phy_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	struct list_head phy_list_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct hpsa_sas_node *parent_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct sas_rphy *rphy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) struct hpsa_sas_phy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	struct list_head phy_list_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	struct sas_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	struct hpsa_sas_port *parent_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	bool added_to_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define EXTERNAL_QD 128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) struct hpsa_scsi_dev_t {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	unsigned int devtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	int bus, target, lun;		/* as presented to the OS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	unsigned char scsi3addr[8];	/* as presented to the HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	u8 physical_device : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	u8 expose_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	u8 removed : 1;			/* device is marked for death */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	u8 was_removed : 1;		/* device actually removed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	unsigned char device_id[16];    /* from inquiry pg. 0x83 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	u64 sas_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	u64 eli;			/* from report diags. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	unsigned char vendor[8];        /* bytes 8-15 of inquiry data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	unsigned char model[16];        /* bytes 16-31 of inquiry data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	unsigned char rev;		/* byte 2 of inquiry data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	unsigned char raid_level;	/* from inquiry page 0xC1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	unsigned char volume_offline;	/* discovered via TUR or VPD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	u16 queue_depth;		/* max queue_depth for this device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	atomic_t commands_outstanding;	/* track commands sent to device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	atomic_t ioaccel_cmds_out;	/* Only used for physical devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 					 * counts commands sent to physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 					 * device via "ioaccel" path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 					 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	bool in_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	u32 ioaccel_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	u8 active_path_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	u8 path_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	u8 bay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	u8 box[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	u16 phys_connector[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	int offload_config;		/* I/O accel RAID offload configured */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	int offload_enabled;		/* I/O accel RAID offload enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	int offload_to_be_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	int hba_ioaccel_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	int offload_to_mirror;		/* Send next I/O accelerator RAID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 					 * offload request to mirror drive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 					 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct raid_map_data raid_map;	/* I/O accelerator RAID map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	 * Pointers from logical drive map indices to the phys drives that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	 * make those logical drives.  Note, multiple logical drives may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	 * share physical drives.  You can have for instance 5 physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	 * drives with 3 logical drives each using those same 5 physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	 * disks. We need these pointers for counting i/o's out to physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	 * devices in order to honor physical device queue depth limits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	struct hpsa_scsi_dev_t *phys_disk[RAID_MAP_MAX_ENTRIES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	int nphysical_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	int supports_aborts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	struct hpsa_sas_port *sas_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	int external;   /* 1-from external array 0-not <0-unknown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) struct reply_queue_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	u64 *head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	u8 wraparound;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	u32 current_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	dma_addr_t busaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #pragma pack(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct bmic_controller_parameters {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	u8   led_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	u8   enable_command_list_verification;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	u8   backed_out_write_drives;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	u16  stripes_for_parity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	u8   parity_distribution_mode_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	u16  max_driver_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	u16  elevator_trend_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	u8   disable_elevator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	u8   force_scan_complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	u8   scsi_transfer_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	u8   force_narrow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	u8   rebuild_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	u8   expand_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	u8   host_sdb_asic_fix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	u8   pdpi_burst_from_host_disabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	char software_name[64];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	char hardware_name[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	u8   bridge_revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	u8   snapshot_priority;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	u32  os_specific;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	u8   post_prompt_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	u8   automatic_drive_slamming;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	u8   reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	u8   nvram_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	u8   cache_nvram_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	u8   drive_config_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	u16  reserved2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	u8   temp_warning_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	u8   temp_shutdown_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	u8   temp_condition_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	u8   max_coalesce_commands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	u32  max_coalesce_delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	u8   orca_password[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	u8   access_id[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	u8   reserved[356];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #pragma pack()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct ctlr_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	unsigned int *reply_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	int	ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	char	devname[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	char    *product_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	u32	board_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	u64	sas_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	void __iomem *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	unsigned long paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	int 	nr_cmds; /* Number of commands allowed on this controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define HPSA_CMDS_RESERVED_FOR_ABORTS 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define HPSA_CMDS_RESERVED_FOR_DRIVER 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	struct CfgTable __iomem *cfgtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	int	interrupts_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	int 	max_commands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	int	last_collision_tag; /* tags are global */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	atomic_t commands_outstanding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #	define PERF_MODE_INT	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #	define DOORBELL_INT	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #	define SIMPLE_MODE_INT	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #	define MEMQ_MODE_INT	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	unsigned int msix_vectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	struct access_method access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	/* queue and queue Info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	unsigned int Qdepth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	unsigned int maxSG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	int maxsgentries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	u8 max_cmd_sg_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	int chainsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	struct SGDescriptor **cmd_sg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	struct ioaccel2_sg_element **ioaccel2_cmd_sg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	/* pointers to command and error info pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	struct CommandList 	*cmd_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	dma_addr_t		cmd_pool_dhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	struct io_accel1_cmd	*ioaccel_cmd_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	dma_addr_t		ioaccel_cmd_pool_dhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	struct io_accel2_cmd	*ioaccel2_cmd_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	dma_addr_t		ioaccel2_cmd_pool_dhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	struct ErrorInfo 	*errinfo_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	dma_addr_t		errinfo_pool_dhandle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	unsigned long  		*cmd_pool_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	int			scan_finished;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	u8			scan_waiting : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	spinlock_t		scan_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	wait_queue_head_t	scan_wait_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	struct Scsi_Host *scsi_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	spinlock_t devlock; /* to protect hba[ctlr]->dev[];  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	int ndevices; /* number of used elements in .dev[] array. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	struct hpsa_scsi_dev_t *dev[HPSA_MAX_DEVICES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	 * Performant mode tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	u32 trans_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	u32 trans_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	struct TransTable_struct __iomem *transtable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	unsigned long transMethod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	/* cap concurrent passthrus at some reasonable maximum */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #define HPSA_MAX_CONCURRENT_PASSTHRUS (10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	atomic_t passthru_cmds_avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	 * Performant mode completion buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	size_t reply_queue_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	struct reply_queue_buffer reply_queue[MAX_REPLY_QUEUES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	u8 nreply_queues;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	u32 *blockFetchTable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	u32 *ioaccel1_blockFetchTable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	u32 *ioaccel2_blockFetchTable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	u32 __iomem *ioaccel2_bft2_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	unsigned char *hba_inquiry_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	u32 driver_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	u32 fw_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	int ioaccel_support;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	int ioaccel_maxsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	u64 last_intr_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	u32 last_heartbeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	u64 last_heartbeat_timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	u32 heartbeat_sample_interval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	atomic_t firmware_flash_in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	u32 __percpu *lockup_detected;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	struct delayed_work monitor_ctlr_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	struct delayed_work rescan_ctlr_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	struct delayed_work event_monitor_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	int remove_in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	/* Address of h->q[x] is passed to intr handler to know which queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	u8 q[MAX_REPLY_QUEUES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	char intrname[MAX_REPLY_QUEUES][16];	/* "hpsa0-msix00" names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	u32 TMFSupportFlags; /* cache what task mgmt funcs are supported. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #define HPSATMF_BITS_SUPPORTED  (1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #define HPSATMF_PHYS_LUN_RESET  (1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #define HPSATMF_PHYS_NEX_RESET  (1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #define HPSATMF_PHYS_TASK_ABORT (1 << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #define HPSATMF_PHYS_TSET_ABORT (1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) #define HPSATMF_PHYS_CLEAR_ACA  (1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) #define HPSATMF_PHYS_CLEAR_TSET (1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) #define HPSATMF_PHYS_QRY_TASK   (1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #define HPSATMF_PHYS_QRY_TSET   (1 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #define HPSATMF_PHYS_QRY_ASYNC  (1 << 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) #define HPSATMF_IOACCEL_ENABLED (1 << 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #define HPSATMF_MASK_SUPPORTED  (1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #define HPSATMF_LOG_LUN_RESET   (1 << 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #define HPSATMF_LOG_NEX_RESET   (1 << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #define HPSATMF_LOG_TASK_ABORT  (1 << 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) #define HPSATMF_LOG_TSET_ABORT  (1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #define HPSATMF_LOG_CLEAR_ACA   (1 << 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #define HPSATMF_LOG_CLEAR_TSET  (1 << 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #define HPSATMF_LOG_QRY_TASK    (1 << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #define HPSATMF_LOG_QRY_TSET    (1 << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #define HPSATMF_LOG_QRY_ASYNC   (1 << 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	u32 events;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #define CTLR_STATE_CHANGE_EVENT				(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #define CTLR_ENCLOSURE_HOT_PLUG_EVENT			(1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #define CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV		(1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #define CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV		(1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #define CTLR_STATE_CHANGE_EVENT_REDUNDANT_CNTRL		(1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #define CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED	(1 << 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #define CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE	(1 << 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) #define RESCAN_REQUIRED_EVENT_BITS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		(CTLR_ENCLOSURE_HOT_PLUG_EVENT | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		CTLR_STATE_CHANGE_EVENT_PHYSICAL_DRV | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 		CTLR_STATE_CHANGE_EVENT_LOGICAL_DRV | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		CTLR_STATE_CHANGE_EVENT_AIO_ENABLED_DISABLED | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		CTLR_STATE_CHANGE_EVENT_AIO_CONFIG_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	spinlock_t offline_device_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	struct list_head offline_device_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	int	acciopath_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	int	drv_req_rescan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	int	raid_offload_debug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	int     discovery_polling;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	int     legacy_board;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	struct  ReportLUNdata *lastlogicals;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	int	needs_abort_tags_swizzled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	struct workqueue_struct *resubmit_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	struct workqueue_struct *rescan_ctlr_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	struct workqueue_struct *monitor_ctlr_wq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	atomic_t abort_cmds_available;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	wait_queue_head_t event_sync_wait_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	struct mutex reset_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	u8 reset_in_progress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	struct hpsa_sas_node *sas_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	spinlock_t reset_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) struct offline_device_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	unsigned char scsi3addr[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	struct list_head offline_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) #define HPSA_ABORT_MSG 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #define HPSA_DEVICE_RESET_MSG 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #define HPSA_RESET_TYPE_CONTROLLER 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) #define HPSA_RESET_TYPE_BUS 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) #define HPSA_RESET_TYPE_LUN 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) #define HPSA_PHYS_TARGET_RESET 0x99 /* not defined by cciss spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #define HPSA_MSG_SEND_RETRY_LIMIT 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) #define HPSA_MSG_SEND_RETRY_INTERVAL_MSECS (10000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* Maximum time in seconds driver will wait for command completions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)  * when polling before giving up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #define HPSA_MAX_POLL_TIME_SECS (20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* During SCSI error recovery, HPSA_TUR_RETRY_LIMIT defines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)  * how many times to retry TEST UNIT READY on a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)  * while waiting for it to become ready before giving up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)  * HPSA_MAX_WAIT_INTERVAL_SECS is the max wait interval
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)  * between sending TURs while waiting for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  * to become ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) #define HPSA_TUR_RETRY_LIMIT (20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) #define HPSA_MAX_WAIT_INTERVAL_SECS (30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* HPSA_BOARD_READY_WAIT_SECS is how long to wait for a board
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  * to become ready, in seconds, before giving up on it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)  * HPSA_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)  * between polling the board to see if it is ready, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)  * milliseconds.  HPSA_BOARD_READY_POLL_INTERVAL and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)  * HPSA_BOARD_READY_ITERATIONS are derived from those.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #define HPSA_BOARD_READY_WAIT_SECS (120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) #define HPSA_BOARD_NOT_READY_WAIT_SECS (100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) #define HPSA_BOARD_READY_POLL_INTERVAL_MSECS (100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) #define HPSA_BOARD_READY_POLL_INTERVAL \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	((HPSA_BOARD_READY_POLL_INTERVAL_MSECS * HZ) / 1000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) #define HPSA_BOARD_READY_ITERATIONS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	((HPSA_BOARD_READY_WAIT_SECS * 1000) / \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) #define HPSA_BOARD_NOT_READY_ITERATIONS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	((HPSA_BOARD_NOT_READY_WAIT_SECS * 1000) / \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		HPSA_BOARD_READY_POLL_INTERVAL_MSECS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #define HPSA_POST_RESET_PAUSE_MSECS (3000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #define HPSA_POST_RESET_NOOP_RETRIES (12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /*  Defining the diffent access_menthods */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)  * Memory mapped FIFO interface (SMART 53xx cards)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #define SA5_DOORBELL	0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) #define SA5_REQUEST_PORT_OFFSET	0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #define SA5_REQUEST_PORT64_LO_OFFSET 0xC0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #define SA5_REQUEST_PORT64_HI_OFFSET 0xC4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) #define SA5_REPLY_INTR_MASK_OFFSET	0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) #define SA5_REPLY_PORT_OFFSET		0x44
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #define SA5_INTR_STATUS		0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) #define SA5_SCRATCHPAD_OFFSET	0xB0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) #define SA5_CTCFG_OFFSET	0xB4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) #define SA5_CTMEM_OFFSET	0xB8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) #define SA5_INTR_OFF		0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) #define SA5B_INTR_OFF		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #define SA5_INTR_PENDING	0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) #define SA5B_INTR_PENDING	0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) #define FIFO_EMPTY		0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #define HPSA_FIRMWARE_READY	0xffff0000 /* value in scratchpad register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #define HPSA_ERROR_BIT		0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /* Performant mode flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #define SA5_PERF_INTR_PENDING   0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #define SA5_PERF_INTR_OFF       0x05
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #define SA5_OUTDB_STATUS_PERF_BIT       0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) #define SA5_OUTDB_CLEAR_PERF_BIT        0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) #define SA5_OUTDB_CLEAR         0xA0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) #define SA5_OUTDB_CLEAR_PERF_BIT        0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #define SA5_OUTDB_STATUS        0x9C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) #define HPSA_INTR_ON 	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) #define HPSA_INTR_OFF	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)  * Inbound Post Queue offsets for IO Accelerator Mode 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) #define IOACCEL2_INBOUND_POSTQ_32	0x48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) #define IOACCEL2_INBOUND_POSTQ_64_LOW	0xd0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #define IOACCEL2_INBOUND_POSTQ_64_HI	0xd4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #define HPSA_PHYSICAL_DEVICE_BUS	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) #define HPSA_RAID_VOLUME_BUS		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) #define HPSA_EXTERNAL_RAID_VOLUME_BUS	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) #define HPSA_HBA_BUS			0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) #define HPSA_LEGACY_HBA_BUS		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	Send the command to the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) static void SA5_submit_command(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	(void) readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static void SA5_submit_command_no_read(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) static void SA5_submit_command_ioaccel2(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	struct CommandList *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)  *  This card is the opposite of the other cards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)  *   0 turns interrupts on...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)  *   0x08 turns them off...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) static void SA5_intr_mask(struct ctlr_info *h, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	if (val) { /* Turn interrupts on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		h->interrupts_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	} else { /* Turn them off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		h->interrupts_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		writel(SA5_INTR_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)  *  Variant of the above; 0x04 turns interrupts off...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) static void SA5B_intr_mask(struct ctlr_info *h, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	if (val) { /* Turn interrupts on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		h->interrupts_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	} else { /* Turn them off */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		h->interrupts_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		writel(SA5B_INTR_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		       h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static void SA5_performant_intr_mask(struct ctlr_info *h, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	if (val) { /* turn on interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		h->interrupts_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		h->interrupts_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		writel(SA5_PERF_INTR_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		(void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	struct reply_queue_buffer *rq = &h->reply_queue[q];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	unsigned long register_value = FIFO_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	/* msi auto clears the interrupt pending bit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	if (unlikely(!(h->pdev->msi_enabled || h->msix_vectors))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		/* flush the controller write of the reply queue by reading
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		 * outbound doorbell status register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		(void) readl(h->vaddr + SA5_OUTDB_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		/* Do a read in order to flush the write to the controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		 * (as per spec.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		(void) readl(h->vaddr + SA5_OUTDB_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		register_value = rq->head[rq->current_entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		rq->current_entry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		atomic_dec(&h->commands_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		register_value = FIFO_EMPTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	/* Check for wraparound */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	if (rq->current_entry == h->max_commands) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		rq->current_entry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		rq->wraparound ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	return register_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)  *   returns value read from hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)  *     returns FIFO_EMPTY if there is nothing to read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) static unsigned long SA5_completed(struct ctlr_info *h,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	__attribute__((unused)) u8 q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	unsigned long register_value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		= readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	if (register_value != FIFO_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		atomic_dec(&h->commands_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) #ifdef HPSA_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	if (register_value != FIFO_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		dev_dbg(&h->pdev->dev, "Read %lx back from board\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 			register_value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		dev_dbg(&h->pdev->dev, "FIFO Empty read\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	return register_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)  *	Returns true if an interrupt is pending..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static bool SA5_intr_pending(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	unsigned long register_value  =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		readl(h->vaddr + SA5_INTR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	return register_value & SA5_INTR_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static bool SA5_performant_intr_pending(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	if (!register_value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	/* Read outbound doorbell to flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	return register_value & SA5_OUTDB_STATUS_PERF_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) #define SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT    0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) static bool SA5_ioaccel_mode1_intr_pending(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	return (register_value & SA5_IOACCEL_MODE1_INTR_STATUS_CMP_BIT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		true : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)  *      Returns true if an interrupt is pending..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static bool SA5B_intr_pending(struct ctlr_info *h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	return readl(h->vaddr + SA5_INTR_STATUS) & SA5B_INTR_PENDING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) #define IOACCEL_MODE1_REPLY_QUEUE_INDEX  0x1A0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) #define IOACCEL_MODE1_PRODUCER_INDEX     0x1B8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) #define IOACCEL_MODE1_CONSUMER_INDEX     0x1BC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) #define IOACCEL_MODE1_REPLY_UNUSED       0xFFFFFFFFFFFFFFFFULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	u64 register_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	struct reply_queue_buffer *rq = &h->reply_queue[q];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 	BUG_ON(q >= h->nreply_queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	register_value = rq->head[rq->current_entry];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	if (register_value != IOACCEL_MODE1_REPLY_UNUSED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		if (++rq->current_entry == rq->size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 			rq->current_entry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 		 * @todo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 		 * Don't really need to write the new index after each command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		 * but with current driver design this is easiest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 		wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 		writel((q << 24) | rq->current_entry, h->vaddr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 				IOACCEL_MODE1_CONSUMER_INDEX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		atomic_dec(&h->commands_outstanding);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	return (unsigned long) register_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static struct access_method SA5_access = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	.submit_command =	SA5_submit_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	.set_intr_mask =	SA5_intr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	.intr_pending =		SA5_intr_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	.command_completed =	SA5_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /* Duplicate entry of the above to mark unsupported boards */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) static struct access_method SA5A_access = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	.submit_command =	SA5_submit_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	.set_intr_mask =	SA5_intr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	.intr_pending =		SA5_intr_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	.command_completed =	SA5_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) static struct access_method SA5B_access = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	.submit_command =	SA5_submit_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 	.set_intr_mask =	SA5B_intr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	.intr_pending =		SA5B_intr_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	.command_completed =	SA5_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static struct access_method SA5_ioaccel_mode1_access = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	.submit_command =	SA5_submit_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	.set_intr_mask =	SA5_performant_intr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	.intr_pending =		SA5_ioaccel_mode1_intr_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	.command_completed =	SA5_ioaccel_mode1_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) static struct access_method SA5_ioaccel_mode2_access = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 	.submit_command =	SA5_submit_command_ioaccel2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	.set_intr_mask =	SA5_performant_intr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	.intr_pending =		SA5_performant_intr_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	.command_completed =	SA5_performant_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static struct access_method SA5_performant_access = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	.submit_command =	SA5_submit_command,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	.set_intr_mask =	SA5_performant_intr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	.intr_pending =		SA5_performant_intr_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	.command_completed =	SA5_performant_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) static struct access_method SA5_performant_access_no_read = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	.submit_command =	SA5_submit_command_no_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	.set_intr_mask =	SA5_performant_intr_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	.intr_pending =		SA5_performant_intr_pending,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	.command_completed =	SA5_performant_completed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct board_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	u32	board_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	char	*product_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	struct access_method *access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) #endif /* HPSA_H */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)