Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     3)  * ipr.c -- driver for IBM Power Linux RAID adapters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     5)  * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     7)  * Copyright (C) 2003, 2004 IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300     9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    11)  * Notes:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    13)  * This driver is used to control the following SCSI adapters:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    15)  * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    16)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    17)  * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    18)  *              PCI-X Dual Channel Ultra 320 SCSI Adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    19)  *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    20)  *              Embedded SCSI adapter on p615 and p655 systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    22)  * Supported Hardware Features:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    23)  *	- Ultra 320 SCSI controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    24)  *	- PCI-X host interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    25)  *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    26)  *	- Non-Volatile Write Cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    27)  *	- Supports attachment of non-RAID disks, tape, and optical devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    28)  *	- RAID Levels 0, 5, 10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    29)  *	- Hot spare
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    30)  *	- Background Parity Checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    31)  *	- Background Data Scrubbing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    32)  *	- Ability to increase the capacity of an existing RAID 5 disk array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    33)  *		by adding disks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    34)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    35)  * Driver Features:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    36)  *	- Tagged command queuing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    37)  *	- Adapter microcode download
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    38)  *	- PCI hot plug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    39)  *	- SCSI device hot plug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    40)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    41)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    43) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    44) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    45) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    46) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    47) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    48) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    49) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    50) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    51) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    52) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    53) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    54) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    55) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    56) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    57) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    58) #include <linux/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    59) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    60) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    61) #include <linux/libata.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    62) #include <linux/hdreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    63) #include <linux/reboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    64) #include <linux/stringify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    65) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    66) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    67) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    68) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    69) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    70) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    71) #include <scsi/scsi_eh.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    72) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    73) #include "ipr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    76)  *   Global Data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    77)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    78) static LIST_HEAD(ipr_ioa_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    79) static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    80) static unsigned int ipr_max_speed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    81) static int ipr_testmode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    82) static unsigned int ipr_fastfail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    83) static unsigned int ipr_transop_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    84) static unsigned int ipr_debug = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    85) static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    86) static unsigned int ipr_dual_ioa_raid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    87) static unsigned int ipr_number_of_msix = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    88) static unsigned int ipr_fast_reboot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    89) static DEFINE_SPINLOCK(ipr_driver_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    91) /* This table describes the differences between DMA controller chips */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    92) static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    93) 	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    94) 		.mailbox = 0x0042C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    95) 		.max_cmds = 100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    96) 		.cache_line_size = 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    97) 		.clear_isr = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    98) 		.iopoll_weight = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    99) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   100) 			.set_interrupt_mask_reg = 0x0022C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   101) 			.clr_interrupt_mask_reg = 0x00230,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   102) 			.clr_interrupt_mask_reg32 = 0x00230,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   103) 			.sense_interrupt_mask_reg = 0x0022C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   104) 			.sense_interrupt_mask_reg32 = 0x0022C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   105) 			.clr_interrupt_reg = 0x00228,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   106) 			.clr_interrupt_reg32 = 0x00228,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   107) 			.sense_interrupt_reg = 0x00224,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   108) 			.sense_interrupt_reg32 = 0x00224,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   109) 			.ioarrin_reg = 0x00404,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   110) 			.sense_uproc_interrupt_reg = 0x00214,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   111) 			.sense_uproc_interrupt_reg32 = 0x00214,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   112) 			.set_uproc_interrupt_reg = 0x00214,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   113) 			.set_uproc_interrupt_reg32 = 0x00214,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   114) 			.clr_uproc_interrupt_reg = 0x00218,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   115) 			.clr_uproc_interrupt_reg32 = 0x00218
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   116) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   117) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   118) 	{ /* Snipe and Scamp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   119) 		.mailbox = 0x0052C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   120) 		.max_cmds = 100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   121) 		.cache_line_size = 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   122) 		.clear_isr = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   123) 		.iopoll_weight = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   124) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   125) 			.set_interrupt_mask_reg = 0x00288,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   126) 			.clr_interrupt_mask_reg = 0x0028C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   127) 			.clr_interrupt_mask_reg32 = 0x0028C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   128) 			.sense_interrupt_mask_reg = 0x00288,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   129) 			.sense_interrupt_mask_reg32 = 0x00288,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   130) 			.clr_interrupt_reg = 0x00284,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   131) 			.clr_interrupt_reg32 = 0x00284,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   132) 			.sense_interrupt_reg = 0x00280,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   133) 			.sense_interrupt_reg32 = 0x00280,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   134) 			.ioarrin_reg = 0x00504,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   135) 			.sense_uproc_interrupt_reg = 0x00290,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   136) 			.sense_uproc_interrupt_reg32 = 0x00290,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   137) 			.set_uproc_interrupt_reg = 0x00290,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   138) 			.set_uproc_interrupt_reg32 = 0x00290,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   139) 			.clr_uproc_interrupt_reg = 0x00294,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   140) 			.clr_uproc_interrupt_reg32 = 0x00294
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   141) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   142) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   143) 	{ /* CRoC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   144) 		.mailbox = 0x00044,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   145) 		.max_cmds = 1000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   146) 		.cache_line_size = 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   147) 		.clear_isr = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   148) 		.iopoll_weight = 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   149) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   150) 			.set_interrupt_mask_reg = 0x00010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   151) 			.clr_interrupt_mask_reg = 0x00018,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   152) 			.clr_interrupt_mask_reg32 = 0x0001C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   153) 			.sense_interrupt_mask_reg = 0x00010,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   154) 			.sense_interrupt_mask_reg32 = 0x00014,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   155) 			.clr_interrupt_reg = 0x00008,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   156) 			.clr_interrupt_reg32 = 0x0000C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   157) 			.sense_interrupt_reg = 0x00000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   158) 			.sense_interrupt_reg32 = 0x00004,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   159) 			.ioarrin_reg = 0x00070,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   160) 			.sense_uproc_interrupt_reg = 0x00020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   161) 			.sense_uproc_interrupt_reg32 = 0x00024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   162) 			.set_uproc_interrupt_reg = 0x00020,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   163) 			.set_uproc_interrupt_reg32 = 0x00024,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   164) 			.clr_uproc_interrupt_reg = 0x00028,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   165) 			.clr_uproc_interrupt_reg32 = 0x0002C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   166) 			.init_feedback_reg = 0x0005C,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   167) 			.dump_addr_reg = 0x00064,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   168) 			.dump_data_reg = 0x00068,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   169) 			.endian_swap_reg = 0x00084
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   170) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   171) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   172) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   174) static const struct ipr_chip_t ipr_chip[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   175) 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   176) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   177) 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   178) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   179) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   180) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   181) 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   182) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   183) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   184) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   185) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   187) static int ipr_max_bus_speeds[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   188) 	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   189) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   191) MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   192) MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   193) module_param_named(max_speed, ipr_max_speed, uint, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   194) MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   195) module_param_named(log_level, ipr_log_level, uint, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   196) MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   197) module_param_named(testmode, ipr_testmode, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   198) MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   199) module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   200) MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   201) module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   202) MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   203) module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   204) MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   205) module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   206) MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   207) module_param_named(max_devs, ipr_max_devs, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   208) MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   209) 		 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   210) module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   211) MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16).  (default:16)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   212) module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   213) MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   214) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   215) MODULE_VERSION(IPR_DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   217) /*  A constant array of IOASCs/URCs/Error Messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   218) static const
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   219) struct ipr_error_table_t ipr_error_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   220) 	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   221) 	"8155: An unknown error was received"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   222) 	{0x00330000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   223) 	"Soft underlength error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   224) 	{0x005A0000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   225) 	"Command to be cancelled not found"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   226) 	{0x00808000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   227) 	"Qualified success"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   228) 	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   229) 	"FFFE: Soft device bus error recovered by the IOA"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   230) 	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   231) 	"4101: Soft device bus fabric error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   232) 	{0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   233) 	"FFFC: Logical block guard error recovered by the device"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   234) 	{0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   235) 	"FFFC: Logical block reference tag error recovered by the device"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   236) 	{0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   237) 	"4171: Recovered scatter list tag / sequence number error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   238) 	{0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   239) 	"FF3D: Recovered logical block CRC error on IOA to Host transfer"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   240) 	{0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   241) 	"4171: Recovered logical block sequence number error on IOA to Host transfer"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   242) 	{0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   243) 	"FFFD: Recovered logical block reference tag error detected by the IOA"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   244) 	{0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   245) 	"FFFD: Logical block guard error recovered by the IOA"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   246) 	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   247) 	"FFF9: Device sector reassign successful"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   248) 	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   249) 	"FFF7: Media error recovered by device rewrite procedures"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   250) 	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   251) 	"7001: IOA sector reassignment successful"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   252) 	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   253) 	"FFF9: Soft media error. Sector reassignment recommended"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   254) 	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   255) 	"FFF7: Media error recovered by IOA rewrite procedures"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   256) 	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   257) 	"FF3D: Soft PCI bus error recovered by the IOA"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   258) 	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   259) 	"FFF6: Device hardware error recovered by the IOA"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   260) 	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   261) 	"FFF6: Device hardware error recovered by the device"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   262) 	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   263) 	"FF3D: Soft IOA error recovered by the IOA"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   264) 	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   265) 	"FFFA: Undefined device response recovered by the IOA"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   266) 	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   267) 	"FFF6: Device bus error, message or command phase"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   268) 	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   269) 	"FFFE: Task Management Function failed"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   270) 	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   271) 	"FFF6: Failure prediction threshold exceeded"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   272) 	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   273) 	"8009: Impending cache battery pack failure"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   274) 	{0x02040100, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   275) 	"Logical Unit in process of becoming ready"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   276) 	{0x02040200, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   277) 	"Initializing command required"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   278) 	{0x02040400, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   279) 	"34FF: Disk device format in progress"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   280) 	{0x02040C00, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   281) 	"Logical unit not accessible, target port in unavailable state"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   282) 	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   283) 	"9070: IOA requested reset"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   284) 	{0x023F0000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   285) 	"Synchronization required"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   286) 	{0x02408500, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   287) 	"IOA microcode download required"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   288) 	{0x02408600, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   289) 	"Device bus connection is prohibited by host"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   290) 	{0x024E0000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   291) 	"No ready, IOA shutdown"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   292) 	{0x025A0000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   293) 	"Not ready, IOA has been shutdown"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   294) 	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   295) 	"3020: Storage subsystem configuration error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   296) 	{0x03110B00, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   297) 	"FFF5: Medium error, data unreadable, recommend reassign"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   298) 	{0x03110C00, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   299) 	"7000: Medium error, data unreadable, do not reassign"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   300) 	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   301) 	"FFF3: Disk media format bad"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   302) 	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   303) 	"3002: Addressed device failed to respond to selection"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   304) 	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   305) 	"3100: Device bus error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   306) 	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   307) 	"3109: IOA timed out a device command"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   308) 	{0x04088000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   309) 	"3120: SCSI bus is not operational"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   310) 	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   311) 	"4100: Hard device bus fabric error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   312) 	{0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   313) 	"310C: Logical block guard error detected by the device"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   314) 	{0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   315) 	"310C: Logical block reference tag error detected by the device"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   316) 	{0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   317) 	"4170: Scatter list tag / sequence number error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   318) 	{0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   319) 	"8150: Logical block CRC error on IOA to Host transfer"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   320) 	{0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   321) 	"4170: Logical block sequence number error on IOA to Host transfer"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   322) 	{0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   323) 	"310D: Logical block reference tag error detected by the IOA"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   324) 	{0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   325) 	"310D: Logical block guard error detected by the IOA"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   326) 	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   327) 	"9000: IOA reserved area data check"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   328) 	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   329) 	"9001: IOA reserved area invalid data pattern"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   330) 	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   331) 	"9002: IOA reserved area LRC error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   332) 	{0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   333) 	"Hardware Error, IOA metadata access error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   334) 	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   335) 	"102E: Out of alternate sectors for disk storage"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   336) 	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   337) 	"FFF4: Data transfer underlength error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   338) 	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   339) 	"FFF4: Data transfer overlength error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   340) 	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   341) 	"3400: Logical unit failure"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   342) 	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   343) 	"FFF4: Device microcode is corrupt"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   344) 	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   345) 	"8150: PCI bus error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   346) 	{0x04430000, 1, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   347) 	"Unsupported device bus message received"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   348) 	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   349) 	"FFF4: Disk device problem"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   350) 	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   351) 	"8150: Permanent IOA failure"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   352) 	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   353) 	"3010: Disk device returned wrong response to IOA"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   354) 	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   355) 	"8151: IOA microcode error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   356) 	{0x04448500, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   357) 	"Device bus status error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   358) 	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   359) 	"8157: IOA error requiring IOA reset to recover"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   360) 	{0x04448700, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   361) 	"ATA device status error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   362) 	{0x04490000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   363) 	"Message reject received from the device"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   364) 	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   365) 	"8008: A permanent cache battery pack failure occurred"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   366) 	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   367) 	"9090: Disk unit has been modified after the last known status"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   368) 	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   369) 	"9081: IOA detected device error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   370) 	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   371) 	"9082: IOA detected device error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   372) 	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   373) 	"3110: Device bus error, message or command phase"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   374) 	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   375) 	"3110: SAS Command / Task Management Function failed"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   376) 	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   377) 	"9091: Incorrect hardware configuration change has been detected"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   378) 	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   379) 	"9073: Invalid multi-adapter configuration"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   380) 	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   381) 	"4010: Incorrect connection between cascaded expanders"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   382) 	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   383) 	"4020: Connections exceed IOA design limits"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   384) 	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   385) 	"4030: Incorrect multipath connection"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   386) 	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   387) 	"4110: Unsupported enclosure function"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   388) 	{0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   389) 	"4120: SAS cable VPD cannot be read"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   390) 	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   391) 	"FFF4: Command to logical unit failed"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   392) 	{0x05240000, 1, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   393) 	"Illegal request, invalid request type or request packet"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   394) 	{0x05250000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   395) 	"Illegal request, invalid resource handle"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   396) 	{0x05258000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   397) 	"Illegal request, commands not allowed to this device"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   398) 	{0x05258100, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   399) 	"Illegal request, command not allowed to a secondary adapter"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   400) 	{0x05258200, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   401) 	"Illegal request, command not allowed to a non-optimized resource"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   402) 	{0x05260000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   403) 	"Illegal request, invalid field in parameter list"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   404) 	{0x05260100, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   405) 	"Illegal request, parameter not supported"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   406) 	{0x05260200, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   407) 	"Illegal request, parameter value invalid"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   408) 	{0x052C0000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   409) 	"Illegal request, command sequence error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   410) 	{0x052C8000, 1, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   411) 	"Illegal request, dual adapter support not enabled"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   412) 	{0x052C8100, 1, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   413) 	"Illegal request, another cable connector was physically disabled"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   414) 	{0x054E8000, 1, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   415) 	"Illegal request, inconsistent group id/group count"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   416) 	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   417) 	"9031: Array protection temporarily suspended, protection resuming"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   418) 	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   419) 	"9040: Array protection temporarily suspended, protection resuming"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   420) 	{0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   421) 	"4080: IOA exceeded maximum operating temperature"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   422) 	{0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   423) 	"4085: Service required"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   424) 	{0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   425) 	"4086: SAS Adapter Hardware Configuration Error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   426) 	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   427) 	"3140: Device bus not ready to ready transition"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   428) 	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   429) 	"FFFB: SCSI bus was reset"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   430) 	{0x06290500, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   431) 	"FFFE: SCSI bus transition to single ended"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   432) 	{0x06290600, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   433) 	"FFFE: SCSI bus transition to LVD"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   434) 	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   435) 	"FFFB: SCSI bus was reset by another initiator"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   436) 	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   437) 	"3029: A device replacement has occurred"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   438) 	{0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   439) 	"4102: Device bus fabric performance degradation"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   440) 	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   441) 	"9051: IOA cache data exists for a missing or failed device"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   442) 	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   443) 	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   444) 	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   445) 	"9025: Disk unit is not supported at its physical location"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   446) 	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   447) 	"3020: IOA detected a SCSI bus configuration error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   448) 	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   449) 	"3150: SCSI bus configuration error"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   450) 	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   451) 	"9074: Asymmetric advanced function disk configuration"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   452) 	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   453) 	"4040: Incomplete multipath connection between IOA and enclosure"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   454) 	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   455) 	"4041: Incomplete multipath connection between enclosure and device"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   456) 	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   457) 	"9075: Incomplete multipath connection between IOA and remote IOA"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   458) 	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   459) 	"9076: Configuration error, missing remote IOA"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   460) 	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   461) 	"4050: Enclosure does not support a required multipath function"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   462) 	{0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   463) 	"4121: Configuration error, required cable is missing"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   464) 	{0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   465) 	"4122: Cable is not plugged into the correct location on remote IOA"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   466) 	{0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   467) 	"4123: Configuration error, invalid cable vital product data"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   468) 	{0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   469) 	"4124: Configuration error, both cable ends are plugged into the same IOA"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   470) 	{0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   471) 	"4070: Logically bad block written on device"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   472) 	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   473) 	"9041: Array protection temporarily suspended"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   474) 	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   475) 	"9042: Corrupt array parity detected on specified device"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   476) 	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   477) 	"9030: Array no longer protected due to missing or failed disk unit"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   478) 	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   479) 	"9071: Link operational transition"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   480) 	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   481) 	"9072: Link not operational transition"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   482) 	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   483) 	"9032: Array exposed but still protected"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   484) 	{0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   485) 	"70DD: Device forced failed by disrupt device command"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   486) 	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   487) 	"4061: Multipath redundancy level got better"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   488) 	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   489) 	"4060: Multipath redundancy level got worse"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   490) 	{0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   491) 	"9083: Device raw mode enabled"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   492) 	{0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   493) 	"9084: Device raw mode disabled"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   494) 	{0x07270000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   495) 	"Failure due to other device"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   496) 	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   497) 	"9008: IOA does not support functions expected by devices"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   498) 	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   499) 	"9010: Cache data associated with attached devices cannot be found"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   500) 	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   501) 	"9011: Cache data belongs to devices other than those attached"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   502) 	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   503) 	"9020: Array missing 2 or more devices with only 1 device present"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   504) 	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   505) 	"9021: Array missing 2 or more devices with 2 or more devices present"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   506) 	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   507) 	"9022: Exposed array is missing a required device"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   508) 	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   509) 	"9023: Array member(s) not at required physical locations"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   510) 	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   511) 	"9024: Array not functional due to present hardware configuration"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   512) 	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   513) 	"9026: Array not functional due to present hardware configuration"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   514) 	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   515) 	"9027: Array is missing a device and parity is out of sync"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   516) 	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   517) 	"9028: Maximum number of arrays already exist"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   518) 	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   519) 	"9050: Required cache data cannot be located for a disk unit"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   520) 	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   521) 	"9052: Cache data exists for a device that has been modified"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   522) 	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   523) 	"9054: IOA resources not available due to previous problems"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   524) 	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   525) 	"9092: Disk unit requires initialization before use"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   526) 	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   527) 	"9029: Incorrect hardware configuration change has been detected"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   528) 	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   529) 	"9060: One or more disk pairs are missing from an array"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   530) 	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   531) 	"9061: One or more disks are missing from an array"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   532) 	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   533) 	"9062: One or more disks are missing from an array"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   534) 	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   535) 	"9063: Maximum number of functional arrays has been exceeded"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   536) 	{0x07279A00, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   537) 	"Data protect, other volume set problem"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   538) 	{0x0B260000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   539) 	"Aborted command, invalid descriptor"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   540) 	{0x0B3F9000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   541) 	"Target operating conditions have changed, dual adapter takeover"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   542) 	{0x0B530200, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   543) 	"Aborted command, medium removal prevented"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   544) 	{0x0B5A0000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   545) 	"Command terminated by host"},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   546) 	{0x0B5B8000, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   547) 	"Aborted command, command terminated by host"}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   548) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   550) static const struct ipr_ses_table_entry ipr_ses_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   551) 	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   552) 	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   553) 	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   554) 	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   555) 	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   556) 	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   557) 	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   558) 	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   559) 	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   560) 	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   561) 	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   562) 	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   563) 	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   564) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   566) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   567)  *  Function Prototypes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   568)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   569) static int ipr_reset_alert(struct ipr_cmnd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   570) static void ipr_process_ccn(struct ipr_cmnd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   571) static void ipr_process_error(struct ipr_cmnd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   572) static void ipr_reset_ioa_job(struct ipr_cmnd *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   573) static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   574) 				   enum ipr_shutdown_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   576) #ifdef CONFIG_SCSI_IPR_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   577) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   578)  * ipr_trc_hook - Add a trace entry to the driver trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   579)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   580)  * @type:		trace type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   581)  * @add_data:	additional data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   582)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   583)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   584)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   585)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   586) static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   587) 			 u8 type, u32 add_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   589) 	struct ipr_trace_entry *trace_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   590) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   591) 	unsigned int trace_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   593) 	trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   594) 	trace_entry = &ioa_cfg->trace[trace_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   595) 	trace_entry->time = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   596) 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   597) 	trace_entry->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   598) 	if (ipr_cmd->ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   599) 		trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   600) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   601) 		trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   602) 	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   603) 	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   604) 	trace_entry->u.add_data = add_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   605) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   607) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   608) #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   609) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   611) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   612)  * ipr_lock_and_done - Acquire lock and complete command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   613)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   614)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   615)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   616)  *	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   617)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   618) static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   620) 	unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   621) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   623) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   624) 	ipr_cmd->done(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   625) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   628) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   629)  * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   630)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   631)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   632)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   633)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   634)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   635) static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   637) 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   638) 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   639) 	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   640) 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   641) 	int hrrq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   643) 	hrrq_id = ioarcb->cmd_pkt.hrrq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   644) 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   645) 	ioarcb->cmd_pkt.hrrq_id = hrrq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   646) 	ioarcb->data_transfer_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   647) 	ioarcb->read_data_transfer_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   648) 	ioarcb->ioadl_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   649) 	ioarcb->read_ioadl_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   651) 	if (ipr_cmd->ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   652) 		ioarcb->u.sis64_addr_data.data_ioadl_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   653) 			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   654) 		ioasa64->u.gata.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   655) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   656) 		ioarcb->write_ioadl_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   657) 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   658) 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   659) 		ioasa->u.gata.status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   662) 	ioasa->hdr.ioasc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   663) 	ioasa->hdr.residual_data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   664) 	ipr_cmd->scsi_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   665) 	ipr_cmd->qc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   666) 	ipr_cmd->sense_buffer[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   667) 	ipr_cmd->dma_use_sg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   668) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   670) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   671)  * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   672)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   673)  * @fast_done:	fast done function call-back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   674)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   675)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   676)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   677)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   678) static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   679) 			      void (*fast_done) (struct ipr_cmnd *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   681) 	ipr_reinit_ipr_cmnd(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   682) 	ipr_cmd->u.scratch = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   683) 	ipr_cmd->sibling = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   684) 	ipr_cmd->eh_comp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   685) 	ipr_cmd->fast_done = fast_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   686) 	timer_setup(&ipr_cmd->timer, NULL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   689) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   690)  * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   691)  * @hrrq:	hrr queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   692)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   693)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   694)  * 	pointer to ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   695)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   696) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   697) struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   699) 	struct ipr_cmnd *ipr_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   701) 	if (likely(!list_empty(&hrrq->hrrq_free_q))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   702) 		ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   703) 			struct ipr_cmnd, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   704) 		list_del(&ipr_cmd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   708) 	return ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   711) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   712)  * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   713)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   714)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   715)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   716)  *	pointer to ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   717)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   718) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   719) struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   721) 	struct ipr_cmnd *ipr_cmd =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   722) 		__ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   723) 	ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   724) 	return ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   727) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   728)  * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   729)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   730)  * @clr_ints:     interrupts to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   731)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   732)  * This function masks all interrupts on the adapter, then clears the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   733)  * interrupts specified in the mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   734)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   735)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   736)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   737)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   738) static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   739) 					  u32 clr_ints)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   741) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   743) 	/* Stop new interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   744) 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   745) 		spin_lock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   746) 		ioa_cfg->hrrq[i].allow_interrupts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   747) 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   748) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   750) 	/* Set interrupt mask to stop all new interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   751) 	if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   752) 		writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   753) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   754) 		writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   756) 	/* Clear any pending interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   757) 	if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   758) 		writel(~0, ioa_cfg->regs.clr_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   759) 	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   760) 	readl(ioa_cfg->regs.sense_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   761) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   763) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   764)  * ipr_save_pcix_cmd_reg - Save PCI-X command register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   765)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   766)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   767)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   768)  * 	0 on success / -EIO on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   769)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   770) static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   771) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   772) 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   774) 	if (pcix_cmd_reg == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   775) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   777) 	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   778) 				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   779) 		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   780) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   781) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   783) 	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   784) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   787) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   788)  * ipr_set_pcix_cmd_reg - Setup PCI-X command register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   789)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   790)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   791)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   792)  * 	0 on success / -EIO on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   793)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   794) static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   796) 	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   798) 	if (pcix_cmd_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   799) 		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   800) 					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   801) 			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   802) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   803) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   804) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   806) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   809) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   810)  * __ipr_sata_eh_done - done function for aborted SATA commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   811)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   812)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   813)  * This function is invoked for ops generated to SATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   814)  * devices which are being aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   815)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   816)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   817)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   818)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   819) static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   821) 	struct ata_queued_cmd *qc = ipr_cmd->qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   822) 	struct ipr_sata_port *sata_port = qc->ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   824) 	qc->err_mask |= AC_ERR_OTHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   825) 	sata_port->ioasa.status |= ATA_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   826) 	ata_qc_complete(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   827) 	if (ipr_cmd->eh_comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   828) 		complete(ipr_cmd->eh_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   829) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   832) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   833)  * ipr_sata_eh_done - done function for aborted SATA commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   834)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   835)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   836)  * This function is invoked for ops generated to SATA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   837)  * devices which are being aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   838)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   839)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   840)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   841)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   842) static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   843) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   844) 	struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   845) 	unsigned long hrrq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   847) 	spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   848) 	__ipr_sata_eh_done(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   849) 	spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   852) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   853)  * __ipr_scsi_eh_done - mid-layer done function for aborted ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   854)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   855)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   856)  * This function is invoked by the interrupt handler for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   857)  * ops generated by the SCSI mid-layer which are being aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   858)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   859)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   860)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   861)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   862) static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   864) 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   866) 	scsi_cmd->result |= (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   868) 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   869) 	scsi_cmd->scsi_done(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   870) 	if (ipr_cmd->eh_comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   871) 		complete(ipr_cmd->eh_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   872) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   875) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   876)  * ipr_scsi_eh_done - mid-layer done function for aborted ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   877)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   878)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   879)  * This function is invoked by the interrupt handler for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   880)  * ops generated by the SCSI mid-layer which are being aborted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   881)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   882)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   883)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   884)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   885) static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   886) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   887) 	unsigned long hrrq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   888) 	struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   890) 	spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   891) 	__ipr_scsi_eh_done(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   892) 	spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   895) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   896)  * ipr_fail_all_ops - Fails all outstanding ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   897)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   898)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   899)  * This function fails all outstanding ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   900)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   901)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   902)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   903)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   904) static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   906) 	struct ipr_cmnd *ipr_cmd, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   907) 	struct ipr_hrr_queue *hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   909) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   910) 	for_each_hrrq(hrrq, ioa_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   911) 		spin_lock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   912) 		list_for_each_entry_safe(ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   913) 					temp, &hrrq->hrrq_pending_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   914) 			list_del(&ipr_cmd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   916) 			ipr_cmd->s.ioasa.hdr.ioasc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   917) 				cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   918) 			ipr_cmd->s.ioasa.hdr.ilid =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   919) 				cpu_to_be32(IPR_DRIVER_ILID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   921) 			if (ipr_cmd->scsi_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   922) 				ipr_cmd->done = __ipr_scsi_eh_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   923) 			else if (ipr_cmd->qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   924) 				ipr_cmd->done = __ipr_sata_eh_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   926) 			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   927) 				     IPR_IOASC_IOA_WAS_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   928) 			del_timer(&ipr_cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   929) 			ipr_cmd->done(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   930) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   931) 		spin_unlock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   932) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   933) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   936) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   937)  * ipr_send_command -  Send driver initiated requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   938)  * @ipr_cmd:		ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   939)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   940)  * This function sends a command to the adapter using the correct write call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   941)  * In the case of sis64, calculate the ioarcb size required. Then or in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   942)  * appropriate bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   943)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   944)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   945)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   946)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   947) static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   948) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   949) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   950) 	dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   952) 	if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   953) 		/* The default size is 256 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   954) 		send_dma_addr |= 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   956) 		/* If the number of ioadls * size of ioadl > 128 bytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   957) 		   then use a 512 byte ioarcb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   958) 		if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   959) 			send_dma_addr |= 0x4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   960) 		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   961) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   962) 		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   965) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   966)  * ipr_do_req -  Send driver initiated requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   967)  * @ipr_cmd:		ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   968)  * @done:			done function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   969)  * @timeout_func:	timeout function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   970)  * @timeout:		timeout value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   971)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   972)  * This function sends the specified command to the adapter with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   973)  * timeout given. The done function is invoked on command completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   974)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   975)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   976)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   977)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   978) static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   979) 		       void (*done) (struct ipr_cmnd *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   980) 		       void (*timeout_func) (struct timer_list *), u32 timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   982) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   984) 	ipr_cmd->done = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   986) 	ipr_cmd->timer.expires = jiffies + timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   987) 	ipr_cmd->timer.function = timeout_func;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   989) 	add_timer(&ipr_cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   991) 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   993) 	ipr_send_command(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   996) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   997)  * ipr_internal_cmd_done - Op done function for an internally generated op.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   998)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   999)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1000)  * This function is the op done function for an internally generated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1001)  * blocking op. It simply wakes the sleeping thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1002)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1003)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1004)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1005)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1006) static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1007) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1008) 	if (ipr_cmd->sibling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1009) 		ipr_cmd->sibling = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1010) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1011) 		complete(&ipr_cmd->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1014) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1015)  * ipr_init_ioadl - initialize the ioadl for the correct SIS type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1016)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1017)  * @dma_addr:	dma address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1018)  * @len:	transfer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1019)  * @flags:	ioadl flag value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1020)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1021)  * This function initializes an ioadl in the case where there is only a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1022)  * descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1023)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1024)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1025)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1026)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1027) static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1028) 			   u32 len, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1029) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1030) 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1031) 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1033) 	ipr_cmd->dma_use_sg = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1035) 	if (ipr_cmd->ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1036) 		ioadl64->flags = cpu_to_be32(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1037) 		ioadl64->data_len = cpu_to_be32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1038) 		ioadl64->address = cpu_to_be64(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1040) 		ipr_cmd->ioarcb.ioadl_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1041) 		       	cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1042) 		ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1043) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1044) 		ioadl->flags_and_data_len = cpu_to_be32(flags | len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1045) 		ioadl->address = cpu_to_be32(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1047) 		if (flags == IPR_IOADL_FLAGS_READ_LAST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1048) 			ipr_cmd->ioarcb.read_ioadl_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1049) 				cpu_to_be32(sizeof(struct ipr_ioadl_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1050) 			ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1051) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1052) 			ipr_cmd->ioarcb.ioadl_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1053) 			       	cpu_to_be32(sizeof(struct ipr_ioadl_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1054) 			ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1055) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1056) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1057) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1059) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1060)  * ipr_send_blocking_cmd - Send command and sleep on its completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1061)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1062)  * @timeout_func:	function to invoke if command times out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1063)  * @timeout:	timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1064)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1065)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1066)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1067)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1068) static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1069) 				  void (*timeout_func) (struct timer_list *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1070) 				  u32 timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1071) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1072) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1074) 	init_completion(&ipr_cmd->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1075) 	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1077) 	spin_unlock_irq(ioa_cfg->host->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1078) 	wait_for_completion(&ipr_cmd->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1079) 	spin_lock_irq(ioa_cfg->host->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1082) static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1084) 	unsigned int hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1086) 	if (ioa_cfg->hrrq_num == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1087) 		hrrq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1088) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1089) 		hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1090) 		hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1091) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1092) 	return hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1095) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1096)  * ipr_send_hcam - Send an HCAM to the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1097)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1098)  * @type:		HCAM type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1099)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1100)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1101)  * This function will send a Host Controlled Async command to the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1102)  * If HCAMs are currently not allowed to be issued to the adapter, it will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1103)  * place the hostrcb on the free queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1104)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1105)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1106)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1107)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1108) static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1109) 			  struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1111) 	struct ipr_cmnd *ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1112) 	struct ipr_ioarcb *ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1114) 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1115) 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1116) 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1117) 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1119) 		ipr_cmd->u.hostrcb = hostrcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1120) 		ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1122) 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1123) 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1124) 		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1125) 		ioarcb->cmd_pkt.cdb[1] = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1126) 		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1127) 		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1129) 		ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1130) 			       sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1132) 		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1133) 			ipr_cmd->done = ipr_process_ccn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1134) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1135) 			ipr_cmd->done = ipr_process_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1137) 		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1139) 		ipr_send_command(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1140) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1141) 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1142) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1145) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1146)  * ipr_update_ata_class - Update the ata class in the resource entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1147)  * @res:	resource entry struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1148)  * @proto:	cfgte device bus protocol value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1149)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1150)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1151)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1152)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1153) static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1155) 	switch (proto) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1156) 	case IPR_PROTO_SATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1157) 	case IPR_PROTO_SAS_STP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1158) 		res->ata_class = ATA_DEV_ATA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1159) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1160) 	case IPR_PROTO_SATA_ATAPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1161) 	case IPR_PROTO_SAS_STP_ATAPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1162) 		res->ata_class = ATA_DEV_ATAPI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1163) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1164) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1165) 		res->ata_class = ATA_DEV_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1166) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1170) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1171)  * ipr_init_res_entry - Initialize a resource entry struct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1172)  * @res:	resource entry struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1173)  * @cfgtew:	config table entry wrapper struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1174)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1175)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1176)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1177)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1178) static void ipr_init_res_entry(struct ipr_resource_entry *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1179) 			       struct ipr_config_table_entry_wrapper *cfgtew)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1181) 	int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1182) 	unsigned int proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1183) 	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1184) 	struct ipr_resource_entry *gscsi_res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1186) 	res->needs_sync_complete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1187) 	res->in_erp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1188) 	res->add_to_ml = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1189) 	res->del_from_ml = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1190) 	res->resetting_device = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1191) 	res->reset_occurred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1192) 	res->sdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1193) 	res->sata_port = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1195) 	if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1196) 		proto = cfgtew->u.cfgte64->proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1197) 		res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1198) 		res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1199) 		res->qmodel = IPR_QUEUEING_MODEL64(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1200) 		res->type = cfgtew->u.cfgte64->res_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1202) 		memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1203) 			sizeof(res->res_path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1205) 		res->bus = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1206) 		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1207) 			sizeof(res->dev_lun.scsi_lun));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1208) 		res->lun = scsilun_to_int(&res->dev_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1210) 		if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1211) 			list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1212) 				if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1213) 					found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1214) 					res->target = gscsi_res->target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1215) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1216) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1217) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1218) 			if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1219) 				res->target = find_first_zero_bit(ioa_cfg->target_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1220) 								  ioa_cfg->max_devs_supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1221) 				set_bit(res->target, ioa_cfg->target_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1222) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1223) 		} else if (res->type == IPR_RES_TYPE_IOAFP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1224) 			res->bus = IPR_IOAFP_VIRTUAL_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1225) 			res->target = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1226) 		} else if (res->type == IPR_RES_TYPE_ARRAY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1227) 			res->bus = IPR_ARRAY_VIRTUAL_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1228) 			res->target = find_first_zero_bit(ioa_cfg->array_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1229) 							  ioa_cfg->max_devs_supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1230) 			set_bit(res->target, ioa_cfg->array_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1231) 		} else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1232) 			res->bus = IPR_VSET_VIRTUAL_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1233) 			res->target = find_first_zero_bit(ioa_cfg->vset_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1234) 							  ioa_cfg->max_devs_supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1235) 			set_bit(res->target, ioa_cfg->vset_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1236) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1237) 			res->target = find_first_zero_bit(ioa_cfg->target_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1238) 							  ioa_cfg->max_devs_supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1239) 			set_bit(res->target, ioa_cfg->target_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1240) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1241) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1242) 		proto = cfgtew->u.cfgte->proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1243) 		res->qmodel = IPR_QUEUEING_MODEL(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1244) 		res->flags = cfgtew->u.cfgte->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1245) 		if (res->flags & IPR_IS_IOA_RESOURCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1246) 			res->type = IPR_RES_TYPE_IOAFP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1247) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1248) 			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1250) 		res->bus = cfgtew->u.cfgte->res_addr.bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1251) 		res->target = cfgtew->u.cfgte->res_addr.target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1252) 		res->lun = cfgtew->u.cfgte->res_addr.lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1253) 		res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1256) 	ipr_update_ata_class(res, proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1259) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1260)  * ipr_is_same_device - Determine if two devices are the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1261)  * @res:	resource entry struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1262)  * @cfgtew:	config table entry wrapper struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1263)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1264)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1265)  * 	1 if the devices are the same / 0 otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1266)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1267) static int ipr_is_same_device(struct ipr_resource_entry *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1268) 			      struct ipr_config_table_entry_wrapper *cfgtew)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1270) 	if (res->ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1271) 		if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1272) 					sizeof(cfgtew->u.cfgte64->dev_id)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1273) 			!memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1274) 					sizeof(cfgtew->u.cfgte64->lun))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1275) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1276) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1277) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1278) 		if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1279) 		    res->target == cfgtew->u.cfgte->res_addr.target &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1280) 		    res->lun == cfgtew->u.cfgte->res_addr.lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1281) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1284) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1287) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1288)  * __ipr_format_res_path - Format the resource path for printing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1289)  * @res_path:	resource path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1290)  * @buffer:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1291)  * @len:	length of buffer provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1292)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1293)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1294)  * 	pointer to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1295)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1296) static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1298) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1299) 	char *p = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1301) 	*p = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1302) 	p += scnprintf(p, buffer + len - p, "%02X", res_path[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1303) 	for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1304) 		p += scnprintf(p, buffer + len - p, "-%02X", res_path[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1306) 	return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1309) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1310)  * ipr_format_res_path - Format the resource path for printing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1311)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1312)  * @res_path:	resource path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1313)  * @buffer:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1314)  * @len:	length of buffer provided
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1315)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1316)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1317)  *	pointer to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1318)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1319) static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1320) 				 u8 *res_path, char *buffer, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1322) 	char *p = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1324) 	*p = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1325) 	p += scnprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1326) 	__ipr_format_res_path(res_path, p, len - (buffer - p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1327) 	return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1330) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1331)  * ipr_update_res_entry - Update the resource entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1332)  * @res:	resource entry struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1333)  * @cfgtew:	config table entry wrapper struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1334)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1335)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1336)  *      none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1337)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1338) static void ipr_update_res_entry(struct ipr_resource_entry *res,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1339) 				 struct ipr_config_table_entry_wrapper *cfgtew)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1341) 	char buffer[IPR_MAX_RES_PATH_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1342) 	unsigned int proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1343) 	int new_path = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1345) 	if (res->ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1346) 		res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1347) 		res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1348) 		res->type = cfgtew->u.cfgte64->res_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1350) 		memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1351) 			sizeof(struct ipr_std_inq_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1353) 		res->qmodel = IPR_QUEUEING_MODEL64(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1354) 		proto = cfgtew->u.cfgte64->proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1355) 		res->res_handle = cfgtew->u.cfgte64->res_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1356) 		res->dev_id = cfgtew->u.cfgte64->dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1358) 		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1359) 			sizeof(res->dev_lun.scsi_lun));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1361) 		if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1362) 					sizeof(res->res_path))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1363) 			memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1364) 				sizeof(res->res_path));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1365) 			new_path = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1366) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1368) 		if (res->sdev && new_path)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1369) 			sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1370) 				    ipr_format_res_path(res->ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1371) 					res->res_path, buffer, sizeof(buffer)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1372) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1373) 		res->flags = cfgtew->u.cfgte->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1374) 		if (res->flags & IPR_IS_IOA_RESOURCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1375) 			res->type = IPR_RES_TYPE_IOAFP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1376) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1377) 			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1379) 		memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1380) 			sizeof(struct ipr_std_inq_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1382) 		res->qmodel = IPR_QUEUEING_MODEL(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1383) 		proto = cfgtew->u.cfgte->proto;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1384) 		res->res_handle = cfgtew->u.cfgte->res_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1387) 	ipr_update_ata_class(res, proto);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1390) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1391)  * ipr_clear_res_target - Clear the bit in the bit map representing the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1392)  * 			  for the resource.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1393)  * @res:	resource entry struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1394)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1395)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1396)  *      none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1397)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1398) static void ipr_clear_res_target(struct ipr_resource_entry *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1400) 	struct ipr_resource_entry *gscsi_res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1401) 	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1403) 	if (!ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1404) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1406) 	if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1407) 		clear_bit(res->target, ioa_cfg->array_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1408) 	else if (res->bus == IPR_VSET_VIRTUAL_BUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1409) 		clear_bit(res->target, ioa_cfg->vset_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1410) 	else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1411) 		list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1412) 			if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1413) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1414) 		clear_bit(res->target, ioa_cfg->target_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1416) 	} else if (res->bus == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1417) 		clear_bit(res->target, ioa_cfg->target_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1420) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1421)  * ipr_handle_config_change - Handle a config change from the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1422)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1423)  * @hostrcb:	hostrcb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1424)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1425)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1426)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1427)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1428) static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1429) 				     struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1431) 	struct ipr_resource_entry *res = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1432) 	struct ipr_config_table_entry_wrapper cfgtew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1433) 	__be32 cc_res_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1435) 	u32 is_ndn = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1437) 	if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1438) 		cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1439) 		cc_res_handle = cfgtew.u.cfgte64->res_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1440) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1441) 		cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1442) 		cc_res_handle = cfgtew.u.cfgte->res_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1443) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1445) 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1446) 		if (res->res_handle == cc_res_handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1447) 			is_ndn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1448) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1449) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1452) 	if (is_ndn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1453) 		if (list_empty(&ioa_cfg->free_res_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1454) 			ipr_send_hcam(ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1455) 				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1456) 				      hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1457) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1458) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1460) 		res = list_entry(ioa_cfg->free_res_q.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1461) 				 struct ipr_resource_entry, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1463) 		list_del(&res->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1464) 		ipr_init_res_entry(res, &cfgtew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1465) 		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1466) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1468) 	ipr_update_res_entry(res, &cfgtew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1470) 	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1471) 		if (res->sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1472) 			res->del_from_ml = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1473) 			res->res_handle = IPR_INVALID_RES_HANDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1474) 			schedule_work(&ioa_cfg->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1475) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1476) 			ipr_clear_res_target(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1477) 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1478) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1479) 	} else if (!res->sdev || res->del_from_ml) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1480) 		res->add_to_ml = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1481) 		schedule_work(&ioa_cfg->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1484) 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1487) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1488)  * ipr_process_ccn - Op done function for a CCN.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1489)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1490)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1491)  * This function is the op done function for a configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1492)  * change notification host controlled async from the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1493)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1494)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1495)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1496)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1497) static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1499) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1500) 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1501) 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1503) 	list_del_init(&hostrcb->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1504) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1506) 	if (ioasc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1507) 		if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1508) 		    ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1509) 			dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1510) 				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1512) 		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1513) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1514) 		ipr_handle_config_change(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1515) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1518) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1519)  * strip_and_pad_whitespace - Strip and pad trailing whitespace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1520)  * @i:		index into buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1521)  * @buf:		string to modify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1522)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1523)  * This function will strip all trailing whitespace, pad the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1524)  * of the string with a single space, and NULL terminate the string.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1525)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1526)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1527)  * 	new length of string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1528)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1529) static int strip_and_pad_whitespace(int i, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1531) 	while (i && buf[i] == ' ')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1532) 		i--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1533) 	buf[i+1] = ' ';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1534) 	buf[i+2] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1535) 	return i + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1538) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1539)  * ipr_log_vpd_compact - Log the passed extended VPD compactly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1540)  * @prefix:		string to print at start of printk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1541)  * @hostrcb:	hostrcb pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1542)  * @vpd:		vendor/product id/sn struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1543)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1544)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1545)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1546)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1547) static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1548) 				struct ipr_vpd *vpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1550) 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1551) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1553) 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1554) 	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1556) 	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1557) 	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1559) 	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1560) 	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1562) 	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1565) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1566)  * ipr_log_vpd - Log the passed VPD to the error log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1567)  * @vpd:		vendor/product id/sn struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1568)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1569)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1570)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1571)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1572) static void ipr_log_vpd(struct ipr_vpd *vpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1574) 	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1575) 		    + IPR_SERIAL_NUM_LEN];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1577) 	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1578) 	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1579) 	       IPR_PROD_ID_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1580) 	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1581) 	ipr_err("Vendor/Product ID: %s\n", buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1583) 	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1584) 	buffer[IPR_SERIAL_NUM_LEN] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1585) 	ipr_err("    Serial Number: %s\n", buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1588) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1589)  * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1590)  * @prefix:		string to print at start of printk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1591)  * @hostrcb:	hostrcb pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1592)  * @vpd:		vendor/product id/sn/wwn struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1593)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1594)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1595)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1596)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1597) static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1598) 				    struct ipr_ext_vpd *vpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1600) 	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1601) 	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1602) 		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1605) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1606)  * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1607)  * @vpd:		vendor/product id/sn/wwn struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1608)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1609)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1610)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1611)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1612) static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1614) 	ipr_log_vpd(&vpd->vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1615) 	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1616) 		be32_to_cpu(vpd->wwid[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1617) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1619) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1620)  * ipr_log_enhanced_cache_error - Log a cache error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1621)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1622)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1623)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1624)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1625)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1626)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1627) static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1628) 					 struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1630) 	struct ipr_hostrcb_type_12_error *error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1632) 	if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1633) 		error = &hostrcb->hcam.u.error64.u.type_12_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1634) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1635) 		error = &hostrcb->hcam.u.error.u.type_12_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1637) 	ipr_err("-----Current Configuration-----\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1638) 	ipr_err("Cache Directory Card Information:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1639) 	ipr_log_ext_vpd(&error->ioa_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1640) 	ipr_err("Adapter Card Information:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1641) 	ipr_log_ext_vpd(&error->cfc_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1643) 	ipr_err("-----Expected Configuration-----\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1644) 	ipr_err("Cache Directory Card Information:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1645) 	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1646) 	ipr_err("Adapter Card Information:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1647) 	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1649) 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1650) 		     be32_to_cpu(error->ioa_data[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1651) 		     be32_to_cpu(error->ioa_data[1]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1652) 		     be32_to_cpu(error->ioa_data[2]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1655) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1656)  * ipr_log_cache_error - Log a cache error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1657)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1658)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1659)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1660)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1661)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1662)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1663) static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1664) 				struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1666) 	struct ipr_hostrcb_type_02_error *error =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1667) 		&hostrcb->hcam.u.error.u.type_02_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1669) 	ipr_err("-----Current Configuration-----\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1670) 	ipr_err("Cache Directory Card Information:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1671) 	ipr_log_vpd(&error->ioa_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1672) 	ipr_err("Adapter Card Information:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1673) 	ipr_log_vpd(&error->cfc_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1675) 	ipr_err("-----Expected Configuration-----\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1676) 	ipr_err("Cache Directory Card Information:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1677) 	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1678) 	ipr_err("Adapter Card Information:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1679) 	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1681) 	ipr_err("Additional IOA Data: %08X %08X %08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1682) 		     be32_to_cpu(error->ioa_data[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1683) 		     be32_to_cpu(error->ioa_data[1]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1684) 		     be32_to_cpu(error->ioa_data[2]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1687) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1688)  * ipr_log_enhanced_config_error - Log a configuration error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1689)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1690)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1691)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1692)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1693)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1694)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1695) static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1696) 					  struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1698) 	int errors_logged, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1699) 	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1700) 	struct ipr_hostrcb_type_13_error *error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1702) 	error = &hostrcb->hcam.u.error.u.type_13_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1703) 	errors_logged = be32_to_cpu(error->errors_logged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1705) 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1706) 		be32_to_cpu(error->errors_detected), errors_logged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1708) 	dev_entry = error->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1710) 	for (i = 0; i < errors_logged; i++, dev_entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1711) 		ipr_err_separator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1713) 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1714) 		ipr_log_ext_vpd(&dev_entry->vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1716) 		ipr_err("-----New Device Information-----\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1717) 		ipr_log_ext_vpd(&dev_entry->new_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1719) 		ipr_err("Cache Directory Card Information:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1720) 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1722) 		ipr_err("Adapter Card Information:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1723) 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1724) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1727) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1728)  * ipr_log_sis64_config_error - Log a device error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1729)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1730)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1731)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1732)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1733)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1734)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1735) static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1736) 				       struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1738) 	int errors_logged, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1739) 	struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1740) 	struct ipr_hostrcb_type_23_error *error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1741) 	char buffer[IPR_MAX_RES_PATH_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1743) 	error = &hostrcb->hcam.u.error64.u.type_23_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1744) 	errors_logged = be32_to_cpu(error->errors_logged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1746) 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1747) 		be32_to_cpu(error->errors_detected), errors_logged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1749) 	dev_entry = error->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1751) 	for (i = 0; i < errors_logged; i++, dev_entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1752) 		ipr_err_separator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1754) 		ipr_err("Device %d : %s", i + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1755) 			__ipr_format_res_path(dev_entry->res_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1756) 					      buffer, sizeof(buffer)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1757) 		ipr_log_ext_vpd(&dev_entry->vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1759) 		ipr_err("-----New Device Information-----\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1760) 		ipr_log_ext_vpd(&dev_entry->new_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1762) 		ipr_err("Cache Directory Card Information:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1763) 		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1765) 		ipr_err("Adapter Card Information:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1766) 		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1767) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1770) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1771)  * ipr_log_config_error - Log a configuration error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1772)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1773)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1774)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1775)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1776)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1777)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1778) static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1779) 				 struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1781) 	int errors_logged, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1782) 	struct ipr_hostrcb_device_data_entry *dev_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1783) 	struct ipr_hostrcb_type_03_error *error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1785) 	error = &hostrcb->hcam.u.error.u.type_03_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1786) 	errors_logged = be32_to_cpu(error->errors_logged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1788) 	ipr_err("Device Errors Detected/Logged: %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1789) 		be32_to_cpu(error->errors_detected), errors_logged);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1791) 	dev_entry = error->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1793) 	for (i = 0; i < errors_logged; i++, dev_entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1794) 		ipr_err_separator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1796) 		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1797) 		ipr_log_vpd(&dev_entry->vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1799) 		ipr_err("-----New Device Information-----\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1800) 		ipr_log_vpd(&dev_entry->new_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1802) 		ipr_err("Cache Directory Card Information:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1803) 		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1805) 		ipr_err("Adapter Card Information:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1806) 		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1808) 		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1809) 			be32_to_cpu(dev_entry->ioa_data[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1810) 			be32_to_cpu(dev_entry->ioa_data[1]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1811) 			be32_to_cpu(dev_entry->ioa_data[2]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1812) 			be32_to_cpu(dev_entry->ioa_data[3]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1813) 			be32_to_cpu(dev_entry->ioa_data[4]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1814) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1817) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1818)  * ipr_log_enhanced_array_error - Log an array configuration error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1819)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1820)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1821)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1822)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1823)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1824)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1825) static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1826) 					 struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1828) 	int i, num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1829) 	struct ipr_hostrcb_type_14_error *error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1830) 	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1831) 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1833) 	error = &hostrcb->hcam.u.error.u.type_14_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1835) 	ipr_err_separator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1837) 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1838) 		error->protection_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1839) 		ioa_cfg->host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1840) 		error->last_func_vset_res_addr.bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1841) 		error->last_func_vset_res_addr.target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1842) 		error->last_func_vset_res_addr.lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1844) 	ipr_err_separator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1846) 	array_entry = error->array_member;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1847) 	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1848) 			    ARRAY_SIZE(error->array_member));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1850) 	for (i = 0; i < num_entries; i++, array_entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1851) 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1852) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1854) 		if (be32_to_cpu(error->exposed_mode_adn) == i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1855) 			ipr_err("Exposed Array Member %d:\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1856) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1857) 			ipr_err("Array Member %d:\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1859) 		ipr_log_ext_vpd(&array_entry->vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1860) 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1861) 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1862) 				 "Expected Location");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1864) 		ipr_err_separator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1868) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1869)  * ipr_log_array_error - Log an array configuration error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1870)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1871)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1872)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1873)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1874)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1875)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1876) static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1877) 				struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1879) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1880) 	struct ipr_hostrcb_type_04_error *error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1881) 	struct ipr_hostrcb_array_data_entry *array_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1882) 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1884) 	error = &hostrcb->hcam.u.error.u.type_04_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1886) 	ipr_err_separator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1888) 	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1889) 		error->protection_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1890) 		ioa_cfg->host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1891) 		error->last_func_vset_res_addr.bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1892) 		error->last_func_vset_res_addr.target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1893) 		error->last_func_vset_res_addr.lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1895) 	ipr_err_separator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1897) 	array_entry = error->array_member;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1899) 	for (i = 0; i < 18; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1900) 		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1901) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1903) 		if (be32_to_cpu(error->exposed_mode_adn) == i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1904) 			ipr_err("Exposed Array Member %d:\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1905) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1906) 			ipr_err("Array Member %d:\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1908) 		ipr_log_vpd(&array_entry->vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1910) 		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1911) 		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1912) 				 "Expected Location");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1914) 		ipr_err_separator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1916) 		if (i == 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1917) 			array_entry = error->array_member2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1918) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1919) 			array_entry++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1920) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1923) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1924)  * ipr_log_hex_data - Log additional hex IOA error data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1925)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1926)  * @data:		IOA error data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1927)  * @len:		data length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1928)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1929)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1930)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1931)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1932) static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1934) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1936) 	if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1937) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1939) 	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1940) 		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1942) 	for (i = 0; i < len / 4; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1943) 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1944) 			be32_to_cpu(data[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1945) 			be32_to_cpu(data[i+1]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1946) 			be32_to_cpu(data[i+2]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1947) 			be32_to_cpu(data[i+3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1948) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1951) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1952)  * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1953)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1954)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1955)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1956)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1957)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1958)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1959) static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1960) 					    struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1962) 	struct ipr_hostrcb_type_17_error *error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1964) 	if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1965) 		error = &hostrcb->hcam.u.error64.u.type_17_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1966) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1967) 		error = &hostrcb->hcam.u.error.u.type_17_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1969) 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1970) 	strim(error->failure_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1972) 	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1973) 		     be32_to_cpu(hostrcb->hcam.u.error.prc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1974) 	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1975) 	ipr_log_hex_data(ioa_cfg, error->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1976) 			 be32_to_cpu(hostrcb->hcam.length) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1977) 			 (offsetof(struct ipr_hostrcb_error, u) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1978) 			  offsetof(struct ipr_hostrcb_type_17_error, data)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1979) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1981) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1982)  * ipr_log_dual_ioa_error - Log a dual adapter error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1983)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1984)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1985)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1986)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1987)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1988)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1989) static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1990) 				   struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1991) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1992) 	struct ipr_hostrcb_type_07_error *error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1994) 	error = &hostrcb->hcam.u.error.u.type_07_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1995) 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1996) 	strim(error->failure_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1998) 	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  1999) 		     be32_to_cpu(hostrcb->hcam.u.error.prc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2000) 	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2001) 	ipr_log_hex_data(ioa_cfg, error->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2002) 			 be32_to_cpu(hostrcb->hcam.length) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2003) 			 (offsetof(struct ipr_hostrcb_error, u) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2004) 			  offsetof(struct ipr_hostrcb_type_07_error, data)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2007) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2008) 	u8 active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2009) 	char *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2010) } path_active_desc[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2011) 	{ IPR_PATH_NO_INFO, "Path" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2012) 	{ IPR_PATH_ACTIVE, "Active path" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2013) 	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2014) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2016) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2017) 	u8 state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2018) 	char *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2019) } path_state_desc[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2020) 	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2021) 	{ IPR_PATH_HEALTHY, "is healthy" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2022) 	{ IPR_PATH_DEGRADED, "is degraded" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2023) 	{ IPR_PATH_FAILED, "is failed" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2024) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2026) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2027)  * ipr_log_fabric_path - Log a fabric path error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2028)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2029)  * @fabric:		fabric descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2030)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2031)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2032)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2033)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2034) static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2035) 				struct ipr_hostrcb_fabric_desc *fabric)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2036) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2037) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2038) 	u8 path_state = fabric->path_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2039) 	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2040) 	u8 state = path_state & IPR_PATH_STATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2042) 	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2043) 		if (path_active_desc[i].active != active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2044) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2046) 		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2047) 			if (path_state_desc[j].state != state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2048) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2050) 			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2051) 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2052) 					     path_active_desc[i].desc, path_state_desc[j].desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2053) 					     fabric->ioa_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2054) 			} else if (fabric->cascaded_expander == 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2055) 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2056) 					     path_active_desc[i].desc, path_state_desc[j].desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2057) 					     fabric->ioa_port, fabric->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2058) 			} else if (fabric->phy == 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2059) 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2060) 					     path_active_desc[i].desc, path_state_desc[j].desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2061) 					     fabric->ioa_port, fabric->cascaded_expander);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2062) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2063) 				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2064) 					     path_active_desc[i].desc, path_state_desc[j].desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2065) 					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2066) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2067) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2068) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2069) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2071) 	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2072) 		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2075) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2076)  * ipr_log64_fabric_path - Log a fabric path error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2077)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2078)  * @fabric:		fabric descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2079)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2080)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2081)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2082)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2083) static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2084) 				  struct ipr_hostrcb64_fabric_desc *fabric)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2086) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2087) 	u8 path_state = fabric->path_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2088) 	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2089) 	u8 state = path_state & IPR_PATH_STATE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2090) 	char buffer[IPR_MAX_RES_PATH_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2092) 	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2093) 		if (path_active_desc[i].active != active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2094) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2096) 		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2097) 			if (path_state_desc[j].state != state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2098) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2100) 			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2101) 				     path_active_desc[i].desc, path_state_desc[j].desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2102) 				     ipr_format_res_path(hostrcb->ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2103) 						fabric->res_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2104) 						buffer, sizeof(buffer)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2105) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2106) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2109) 	ipr_err("Path state=%02X Resource Path=%s\n", path_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2110) 		ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2111) 				    buffer, sizeof(buffer)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2114) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2115) 	u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2116) 	char *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2117) } path_type_desc[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2118) 	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2119) 	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2120) 	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2121) 	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2122) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2124) static const struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2125) 	u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2126) 	char *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2127) } path_status_desc[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2128) 	{ IPR_PATH_CFG_NO_PROB, "Functional" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2129) 	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2130) 	{ IPR_PATH_CFG_FAILED, "Failed" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2131) 	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2132) 	{ IPR_PATH_NOT_DETECTED, "Missing" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2133) 	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2136) static const char *link_rate[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2137) 	"unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2138) 	"disabled",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2139) 	"phy reset problem",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2140) 	"spinup hold",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2141) 	"port selector",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2142) 	"unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2143) 	"unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2144) 	"unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2145) 	"1.5Gbps",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2146) 	"3.0Gbps",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2147) 	"unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2148) 	"unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2149) 	"unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2150) 	"unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2151) 	"unknown",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2152) 	"unknown"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2153) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2155) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2156)  * ipr_log_path_elem - Log a fabric path element.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2157)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2158)  * @cfg:		fabric path element struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2159)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2160)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2161)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2162)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2163) static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2164) 			      struct ipr_hostrcb_config_element *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2166) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2167) 	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2168) 	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2170) 	if (type == IPR_PATH_CFG_NOT_EXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2171) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2173) 	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2174) 		if (path_type_desc[i].type != type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2175) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2177) 		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2178) 			if (path_status_desc[j].status != status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2179) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2181) 			if (type == IPR_PATH_CFG_IOA_PORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2182) 				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2183) 					     path_status_desc[j].desc, path_type_desc[i].desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2184) 					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2185) 					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2186) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2187) 				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2188) 					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2189) 						     path_status_desc[j].desc, path_type_desc[i].desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2190) 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2191) 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2192) 				} else if (cfg->cascaded_expander == 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2193) 					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2194) 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2195) 						     path_type_desc[i].desc, cfg->phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2196) 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2197) 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2198) 				} else if (cfg->phy == 0xff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2199) 					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2200) 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2201) 						     path_type_desc[i].desc, cfg->cascaded_expander,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2202) 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2203) 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2204) 				} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2205) 					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2206) 						     "WWN=%08X%08X\n", path_status_desc[j].desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2207) 						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2208) 						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2209) 						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2210) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2211) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2212) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2213) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2216) 	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2217) 		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2218) 		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2219) 		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2222) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2223)  * ipr_log64_path_elem - Log a fabric path element.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2224)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2225)  * @cfg:		fabric path element struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2226)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2227)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2228)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2229)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2230) static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2231) 				struct ipr_hostrcb64_config_element *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2233) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2234) 	u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2235) 	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2236) 	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2237) 	char buffer[IPR_MAX_RES_PATH_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2239) 	if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2240) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2242) 	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2243) 		if (path_type_desc[i].type != type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2244) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2246) 		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2247) 			if (path_status_desc[j].status != status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2248) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2250) 			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2251) 				     path_status_desc[j].desc, path_type_desc[i].desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2252) 				     ipr_format_res_path(hostrcb->ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2253) 					cfg->res_path, buffer, sizeof(buffer)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2254) 					link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2255) 					be32_to_cpu(cfg->wwid[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2256) 					be32_to_cpu(cfg->wwid[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2257) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2258) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2260) 	ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2261) 		     "WWN=%08X%08X\n", cfg->type_status,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2262) 		     ipr_format_res_path(hostrcb->ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2263) 			cfg->res_path, buffer, sizeof(buffer)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2264) 			link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2265) 			be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2268) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2269)  * ipr_log_fabric_error - Log a fabric error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2270)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2271)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2272)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2273)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2274)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2275)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2276) static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2277) 				 struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2279) 	struct ipr_hostrcb_type_20_error *error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2280) 	struct ipr_hostrcb_fabric_desc *fabric;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2281) 	struct ipr_hostrcb_config_element *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2282) 	int i, add_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2284) 	error = &hostrcb->hcam.u.error.u.type_20_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2285) 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2286) 	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2288) 	add_len = be32_to_cpu(hostrcb->hcam.length) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2289) 		(offsetof(struct ipr_hostrcb_error, u) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2290) 		 offsetof(struct ipr_hostrcb_type_20_error, desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2292) 	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2293) 		ipr_log_fabric_path(hostrcb, fabric);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2294) 		for_each_fabric_cfg(fabric, cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2295) 			ipr_log_path_elem(hostrcb, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2297) 		add_len -= be16_to_cpu(fabric->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2298) 		fabric = (struct ipr_hostrcb_fabric_desc *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2299) 			((unsigned long)fabric + be16_to_cpu(fabric->length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2302) 	ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2305) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2306)  * ipr_log_sis64_array_error - Log a sis64 array error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2307)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2308)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2309)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2310)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2311)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2312)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2313) static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2314) 				      struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2316) 	int i, num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2317) 	struct ipr_hostrcb_type_24_error *error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2318) 	struct ipr_hostrcb64_array_data_entry *array_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2319) 	char buffer[IPR_MAX_RES_PATH_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2320) 	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2322) 	error = &hostrcb->hcam.u.error64.u.type_24_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2324) 	ipr_err_separator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2326) 	ipr_err("RAID %s Array Configuration: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2327) 		error->protection_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2328) 		ipr_format_res_path(ioa_cfg, error->last_res_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2329) 			buffer, sizeof(buffer)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2331) 	ipr_err_separator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2333) 	array_entry = error->array_member;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2334) 	num_entries = min_t(u32, error->num_entries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2335) 			    ARRAY_SIZE(error->array_member));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2337) 	for (i = 0; i < num_entries; i++, array_entry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2339) 		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2340) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2342) 		if (error->exposed_mode_adn == i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2343) 			ipr_err("Exposed Array Member %d:\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2344) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2345) 			ipr_err("Array Member %d:\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2347) 		ipr_err("Array Member %d:\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2348) 		ipr_log_ext_vpd(&array_entry->vpd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2349) 		ipr_err("Current Location: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2350) 			 ipr_format_res_path(ioa_cfg, array_entry->res_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2351) 				buffer, sizeof(buffer)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2352) 		ipr_err("Expected Location: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2353) 			 ipr_format_res_path(ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2354) 				array_entry->expected_res_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2355) 				buffer, sizeof(buffer)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2357) 		ipr_err_separator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2358) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2361) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2362)  * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2363)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2364)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2365)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2366)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2367)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2368)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2369) static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2370) 				       struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2372) 	struct ipr_hostrcb_type_30_error *error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2373) 	struct ipr_hostrcb64_fabric_desc *fabric;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2374) 	struct ipr_hostrcb64_config_element *cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2375) 	int i, add_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2377) 	error = &hostrcb->hcam.u.error64.u.type_30_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2379) 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2380) 	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2382) 	add_len = be32_to_cpu(hostrcb->hcam.length) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2383) 		(offsetof(struct ipr_hostrcb64_error, u) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2384) 		 offsetof(struct ipr_hostrcb_type_30_error, desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2386) 	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2387) 		ipr_log64_fabric_path(hostrcb, fabric);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2388) 		for_each_fabric_cfg(fabric, cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2389) 			ipr_log64_path_elem(hostrcb, cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2391) 		add_len -= be16_to_cpu(fabric->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2392) 		fabric = (struct ipr_hostrcb64_fabric_desc *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2393) 			((unsigned long)fabric + be16_to_cpu(fabric->length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2396) 	ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2399) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2400)  * ipr_log_sis64_service_required_error - Log a sis64 service required error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2401)  * @ioa_cfg:    ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2402)  * @hostrcb:    hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2403)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2404)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2405)  *      none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2406)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2407) static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2408) 				       struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2410) 	struct ipr_hostrcb_type_41_error *error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2412) 	error = &hostrcb->hcam.u.error64.u.type_41_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2414) 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2415) 	ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2416) 	ipr_log_hex_data(ioa_cfg, error->data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2417) 			 be32_to_cpu(hostrcb->hcam.length) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2418) 			 (offsetof(struct ipr_hostrcb_error, u) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2419) 			  offsetof(struct ipr_hostrcb_type_41_error, data)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2421) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2422)  * ipr_log_generic_error - Log an adapter error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2423)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2424)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2425)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2426)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2427)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2428)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2429) static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2430) 				  struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2432) 	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2433) 			 be32_to_cpu(hostrcb->hcam.length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2436) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2437)  * ipr_log_sis64_device_error - Log a cache error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2438)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2439)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2440)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2441)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2442)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2443)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2444) static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2445) 					 struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2447) 	struct ipr_hostrcb_type_21_error *error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2448) 	char buffer[IPR_MAX_RES_PATH_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2450) 	error = &hostrcb->hcam.u.error64.u.type_21_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2452) 	ipr_err("-----Failing Device Information-----\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2453) 	ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2454) 		be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2455) 		 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2456) 	ipr_err("Device Resource Path: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2457) 		__ipr_format_res_path(error->res_path,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2458) 				      buffer, sizeof(buffer)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2459) 	error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2460) 	error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2461) 	ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2462) 	ipr_err("Secondary Problem Description:  %s\n", error->second_problem_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2463) 	ipr_err("SCSI Sense Data:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2464) 	ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2465) 	ipr_err("SCSI Command Descriptor Block: \n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2466) 	ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2468) 	ipr_err("Additional IOA Data:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2469) 	ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2472) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2473)  * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2474)  * @ioasc:	IOASC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2475)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2476)  * This function will return the index of into the ipr_error_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2477)  * for the specified IOASC. If the IOASC is not in the table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2478)  * 0 will be returned, which points to the entry used for unknown errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2479)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2480)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2481)  * 	index into the ipr_error_table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2482)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2483) static u32 ipr_get_error(u32 ioasc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2485) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2487) 	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2488) 		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2489) 			return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2491) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2494) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2495)  * ipr_handle_log_data - Log an adapter error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2496)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2497)  * @hostrcb:	hostrcb struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2498)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2499)  * This function logs an adapter error to the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2500)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2501)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2502)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2503)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2504) static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2505) 				struct ipr_hostrcb *hostrcb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2507) 	u32 ioasc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2508) 	int error_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2509) 	struct ipr_hostrcb_type_21_error *error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2511) 	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2512) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2514) 	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2515) 		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2517) 	if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2518) 		ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2519) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2520) 		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2522) 	if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2523) 	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2524) 		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2525) 		scsi_report_bus_reset(ioa_cfg->host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2526) 				      hostrcb->hcam.u.error.fd_res_addr.bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2527) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2529) 	error_index = ipr_get_error(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2531) 	if (!ipr_error_table[error_index].log_hcam)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2532) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2534) 	if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2535) 	    hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2536) 		error = &hostrcb->hcam.u.error64.u.type_21_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2538) 		if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2539) 			ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2540) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2541) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2543) 	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2545) 	/* Set indication we have logged an error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2546) 	ioa_cfg->errors_logged++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2548) 	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2549) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2550) 	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2551) 		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2553) 	switch (hostrcb->hcam.overlay_id) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2554) 	case IPR_HOST_RCB_OVERLAY_ID_2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2555) 		ipr_log_cache_error(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2556) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2557) 	case IPR_HOST_RCB_OVERLAY_ID_3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2558) 		ipr_log_config_error(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2559) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2560) 	case IPR_HOST_RCB_OVERLAY_ID_4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2561) 	case IPR_HOST_RCB_OVERLAY_ID_6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2562) 		ipr_log_array_error(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2563) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2564) 	case IPR_HOST_RCB_OVERLAY_ID_7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2565) 		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2566) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2567) 	case IPR_HOST_RCB_OVERLAY_ID_12:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2568) 		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2569) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2570) 	case IPR_HOST_RCB_OVERLAY_ID_13:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2571) 		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2572) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2573) 	case IPR_HOST_RCB_OVERLAY_ID_14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2574) 	case IPR_HOST_RCB_OVERLAY_ID_16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2575) 		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2576) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2577) 	case IPR_HOST_RCB_OVERLAY_ID_17:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2578) 		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2579) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2580) 	case IPR_HOST_RCB_OVERLAY_ID_20:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2581) 		ipr_log_fabric_error(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2582) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2583) 	case IPR_HOST_RCB_OVERLAY_ID_21:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2584) 		ipr_log_sis64_device_error(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2585) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2586) 	case IPR_HOST_RCB_OVERLAY_ID_23:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2587) 		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2588) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2589) 	case IPR_HOST_RCB_OVERLAY_ID_24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2590) 	case IPR_HOST_RCB_OVERLAY_ID_26:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2591) 		ipr_log_sis64_array_error(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2592) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2593) 	case IPR_HOST_RCB_OVERLAY_ID_30:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2594) 		ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2595) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2596) 	case IPR_HOST_RCB_OVERLAY_ID_41:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2597) 		ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2598) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2599) 	case IPR_HOST_RCB_OVERLAY_ID_1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2600) 	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2601) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2602) 		ipr_log_generic_error(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2603) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2604) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2607) static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2609) 	struct ipr_hostrcb *hostrcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2611) 	hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2612) 					struct ipr_hostrcb, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2614) 	if (unlikely(!hostrcb)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2615) 		dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2616) 		hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2617) 						struct ipr_hostrcb, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2620) 	list_del_init(&hostrcb->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2621) 	return hostrcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2624) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2625)  * ipr_process_error - Op done function for an adapter error log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2626)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2627)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2628)  * This function is the op done function for an error log host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2629)  * controlled async from the adapter. It will log the error and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2630)  * send the HCAM back to the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2631)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2632)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2633)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2634)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2635) static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2637) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2638) 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2639) 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2640) 	u32 fd_ioasc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2642) 	if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2643) 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2644) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2645) 		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2647) 	list_del_init(&hostrcb->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2648) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2650) 	if (!ioasc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2651) 		ipr_handle_log_data(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2652) 		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2653) 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2654) 	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2655) 		   ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2656) 		dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2657) 			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2658) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2660) 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2661) 	schedule_work(&ioa_cfg->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2662) 	hostrcb = ipr_get_free_hostrcb(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2664) 	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2667) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2668)  * ipr_timeout -  An internally generated op has timed out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2669)  * @t: Timer context used to fetch ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2670)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2671)  * This function blocks host requests and initiates an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2672)  * adapter reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2673)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2674)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2675)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2676)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2677) static void ipr_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2679) 	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2680) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2681) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2683) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2684) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2686) 	ioa_cfg->errors_logged++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2687) 	dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2688) 		"Adapter being reset due to command timeout.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2690) 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2691) 		ioa_cfg->sdt_state = GET_DUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2693) 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2694) 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2696) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2697) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2700) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2701)  * ipr_oper_timeout -  Adapter timed out transitioning to operational
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2702)  * @t: Timer context used to fetch ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2703)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2704)  * This function blocks host requests and initiates an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2705)  * adapter reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2706)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2707)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2708)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2709)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2710) static void ipr_oper_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2712) 	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2713) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2714) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2716) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2717) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2719) 	ioa_cfg->errors_logged++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2720) 	dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2721) 		"Adapter timed out transitioning to operational.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2723) 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2724) 		ioa_cfg->sdt_state = GET_DUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2726) 	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2727) 		if (ipr_fastfail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2728) 			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2729) 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2732) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2733) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2736) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2737)  * ipr_find_ses_entry - Find matching SES in SES table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2738)  * @res:	resource entry struct of SES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2739)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2740)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2741)  * 	pointer to SES table entry / NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2742)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2743) static const struct ipr_ses_table_entry *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2744) ipr_find_ses_entry(struct ipr_resource_entry *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2746) 	int i, j, matches;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2747) 	struct ipr_std_inq_vpids *vpids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2748) 	const struct ipr_ses_table_entry *ste = ipr_ses_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2750) 	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2751) 		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2752) 			if (ste->compare_product_id_byte[j] == 'X') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2753) 				vpids = &res->std_inq_data.vpids;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2754) 				if (vpids->product_id[j] == ste->product_id[j])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2755) 					matches++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2756) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2757) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2758) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2759) 				matches++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2760) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2762) 		if (matches == IPR_PROD_ID_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2763) 			return ste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2764) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2766) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2769) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2770)  * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2771)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2772)  * @bus:		SCSI bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2773)  * @bus_width:	bus width
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2774)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2775)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2776)  *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2777)  *	For a 2-byte wide SCSI bus, the maximum transfer speed is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2778)  *	twice the maximum transfer rate (e.g. for a wide enabled bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2779)  *	max 160MHz = max 320MB/sec).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2780)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2781) static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2783) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2784) 	const struct ipr_ses_table_entry *ste;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2785) 	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2787) 	/* Loop through each config table entry in the config table buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2788) 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2789) 		if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2790) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2792) 		if (bus != res->bus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2793) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2795) 		if (!(ste = ipr_find_ses_entry(res)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2796) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2798) 		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2799) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2801) 	return max_xfer_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2804) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2805)  * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2806)  * @ioa_cfg:		ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2807)  * @max_delay:		max delay in micro-seconds to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2808)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2809)  * Waits for an IODEBUG ACK from the IOA, doing busy looping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2810)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2811)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2812)  * 	0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2813)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2814) static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2816) 	volatile u32 pcii_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2817) 	int delay = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2819) 	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2820) 	while (delay < max_delay) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2821) 		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2823) 		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2824) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2826) 		/* udelay cannot be used if delay is more than a few milliseconds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2827) 		if ((delay / 1000) > MAX_UDELAY_MS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2828) 			mdelay(delay / 1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2829) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2830) 			udelay(delay);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2832) 		delay += delay;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2833) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2834) 	return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2837) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2838)  * ipr_get_sis64_dump_data_section - Dump IOA memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2839)  * @ioa_cfg:			ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2840)  * @start_addr:			adapter address to dump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2841)  * @dest:			destination kernel buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2842)  * @length_in_words:		length to dump in 4 byte words
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2843)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2844)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2845)  * 	0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2846)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2847) static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2848) 					   u32 start_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2849) 					   __be32 *dest, u32 length_in_words)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2851) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2853) 	for (i = 0; i < length_in_words; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2854) 		writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2855) 		*dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2856) 		dest++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2857) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2859) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2862) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2863)  * ipr_get_ldump_data_section - Dump IOA memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2864)  * @ioa_cfg:			ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2865)  * @start_addr:			adapter address to dump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2866)  * @dest:				destination kernel buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2867)  * @length_in_words:	length to dump in 4 byte words
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2868)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2869)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2870)  * 	0 on success / -EIO on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2871)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2872) static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2873) 				      u32 start_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2874) 				      __be32 *dest, u32 length_in_words)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2876) 	volatile u32 temp_pcii_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2877) 	int i, delay = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2879) 	if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2880) 		return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2881) 						       dest, length_in_words);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2883) 	/* Write IOA interrupt reg starting LDUMP state  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2884) 	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2885) 	       ioa_cfg->regs.set_uproc_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2887) 	/* Wait for IO debug acknowledge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2888) 	if (ipr_wait_iodbg_ack(ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2889) 			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2890) 		dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2891) 			"IOA dump long data transfer timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2892) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2893) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2895) 	/* Signal LDUMP interlocked - clear IO debug ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2896) 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2897) 	       ioa_cfg->regs.clr_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2899) 	/* Write Mailbox with starting address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2900) 	writel(start_addr, ioa_cfg->ioa_mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2902) 	/* Signal address valid - clear IOA Reset alert */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2903) 	writel(IPR_UPROCI_RESET_ALERT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2904) 	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2906) 	for (i = 0; i < length_in_words; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2907) 		/* Wait for IO debug acknowledge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2908) 		if (ipr_wait_iodbg_ack(ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2909) 				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2910) 			dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2911) 				"IOA dump short data transfer timeout\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2912) 			return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2913) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2915) 		/* Read data from mailbox and increment destination pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2916) 		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2917) 		dest++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2919) 		/* For all but the last word of data, signal data received */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2920) 		if (i < (length_in_words - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2921) 			/* Signal dump data received - Clear IO debug Ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2922) 			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2923) 			       ioa_cfg->regs.clr_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2924) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2925) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2927) 	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2928) 	writel(IPR_UPROCI_RESET_ALERT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2929) 	       ioa_cfg->regs.set_uproc_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2931) 	writel(IPR_UPROCI_IO_DEBUG_ALERT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2932) 	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2934) 	/* Signal dump data received - Clear IO debug Ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2935) 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2936) 	       ioa_cfg->regs.clr_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2938) 	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2939) 	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2940) 		temp_pcii_reg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2941) 		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2943) 		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2944) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2946) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2947) 		delay += 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2948) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2950) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2953) #ifdef CONFIG_SCSI_IPR_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2954) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2955)  * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2956)  * @ioa_cfg:		ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2957)  * @pci_address:	adapter address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2958)  * @length:			length of data to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2959)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2960)  * Copy data from PCI adapter to kernel buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2961)  * Note: length MUST be a 4 byte multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2962)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2963)  * 	0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2964)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2965) static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2966) 			unsigned long pci_address, u32 length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2968) 	int bytes_copied = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2969) 	int cur_len, rc, rem_len, rem_page_len, max_dump_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2970) 	__be32 *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2971) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2972) 	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2974) 	if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2975) 		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2976) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2977) 		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2978) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2979) 	while (bytes_copied < length &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2980) 	       (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2981) 		if (ioa_dump->page_offset >= PAGE_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2982) 		    ioa_dump->page_offset == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2983) 			page = (__be32 *)__get_free_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2985) 			if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2986) 				ipr_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2987) 				return bytes_copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2988) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2990) 			ioa_dump->page_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2991) 			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2992) 			ioa_dump->next_page_index++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2993) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2994) 			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2996) 		rem_len = length - bytes_copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2997) 		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2998) 		cur_len = min(rem_len, rem_page_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  2999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3000) 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3001) 		if (ioa_cfg->sdt_state == ABORT_DUMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3002) 			rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3003) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3004) 			rc = ipr_get_ldump_data_section(ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3005) 							pci_address + bytes_copied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3006) 							&page[ioa_dump->page_offset / 4],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3007) 							(cur_len / sizeof(u32)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3008) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3009) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3011) 		if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3012) 			ioa_dump->page_offset += cur_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3013) 			bytes_copied += cur_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3014) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3015) 			ipr_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3016) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3017) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3018) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3019) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3021) 	return bytes_copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3024) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3025)  * ipr_init_dump_entry_hdr - Initialize a dump entry header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3026)  * @hdr:	dump entry header struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3027)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3028)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3029)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3030)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3031) static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3033) 	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3034) 	hdr->num_elems = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3035) 	hdr->offset = sizeof(*hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3036) 	hdr->status = IPR_DUMP_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3039) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3040)  * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3041)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3042)  * @driver_dump:	driver dump struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3043)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3044)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3045)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3046)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3047) static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3048) 				   struct ipr_driver_dump *driver_dump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3050) 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3052) 	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3053) 	driver_dump->ioa_type_entry.hdr.len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3054) 		sizeof(struct ipr_dump_ioa_type_entry) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3055) 		sizeof(struct ipr_dump_entry_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3056) 	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3057) 	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3058) 	driver_dump->ioa_type_entry.type = ioa_cfg->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3059) 	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3060) 		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3061) 		ucode_vpd->minor_release[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3062) 	driver_dump->hdr.num_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3063) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3065) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3066)  * ipr_dump_version_data - Fill in the driver version in the dump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3067)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3068)  * @driver_dump:	driver dump struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3069)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3070)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3071)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3072)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3073) static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3074) 				  struct ipr_driver_dump *driver_dump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3076) 	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3077) 	driver_dump->version_entry.hdr.len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3078) 		sizeof(struct ipr_dump_version_entry) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3079) 		sizeof(struct ipr_dump_entry_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3080) 	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3081) 	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3082) 	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3083) 	driver_dump->hdr.num_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3086) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3087)  * ipr_dump_trace_data - Fill in the IOA trace in the dump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3088)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3089)  * @driver_dump:	driver dump struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3090)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3091)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3092)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3093)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3094) static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3095) 				   struct ipr_driver_dump *driver_dump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3097) 	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3098) 	driver_dump->trace_entry.hdr.len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3099) 		sizeof(struct ipr_dump_trace_entry) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3100) 		sizeof(struct ipr_dump_entry_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3101) 	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3102) 	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3103) 	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3104) 	driver_dump->hdr.num_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3107) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3108)  * ipr_dump_location_data - Fill in the IOA location in the dump.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3109)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3110)  * @driver_dump:	driver dump struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3111)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3112)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3113)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3114)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3115) static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3116) 				   struct ipr_driver_dump *driver_dump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3118) 	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3119) 	driver_dump->location_entry.hdr.len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3120) 		sizeof(struct ipr_dump_location_entry) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3121) 		sizeof(struct ipr_dump_entry_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3122) 	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3123) 	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3124) 	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3125) 	driver_dump->hdr.num_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3128) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3129)  * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3130)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3131)  * @dump:		dump struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3132)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3133)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3134)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3135)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3136) static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3138) 	unsigned long start_addr, sdt_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3139) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3140) 	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3141) 	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3142) 	u32 num_entries, max_num_entries, start_off, end_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3143) 	u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3144) 	struct ipr_sdt *sdt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3145) 	int valid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3146) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3148) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3150) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3152) 	if (ioa_cfg->sdt_state != READ_DUMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3153) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3154) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3155) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3157) 	if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3158) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3159) 		ssleep(IPR_DUMP_DELAY_SECONDS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3160) 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3163) 	start_addr = readl(ioa_cfg->ioa_mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3165) 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3166) 		dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3167) 			"Invalid dump table format: %lx\n", start_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3168) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3169) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3170) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3172) 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3174) 	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3176) 	/* Initialize the overall dump header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3177) 	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3178) 	driver_dump->hdr.num_entries = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3179) 	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3180) 	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3181) 	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3182) 	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3184) 	ipr_dump_version_data(ioa_cfg, driver_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3185) 	ipr_dump_location_data(ioa_cfg, driver_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3186) 	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3187) 	ipr_dump_trace_data(ioa_cfg, driver_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3189) 	/* Update dump_header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3190) 	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3192) 	/* IOA Dump entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3193) 	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3194) 	ioa_dump->hdr.len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3195) 	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3196) 	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3198) 	/* First entries in sdt are actually a list of dump addresses and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3199) 	 lengths to gather the real dump data.  sdt represents the pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3200) 	 to the ioa generated dump table.  Dump data will be extracted based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3201) 	 on entries in this table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3202) 	sdt = &ioa_dump->sdt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3204) 	if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3205) 		max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3206) 		max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3207) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3208) 		max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3209) 		max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3212) 	bytes_to_copy = offsetof(struct ipr_sdt, entry) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3213) 			(max_num_entries * sizeof(struct ipr_sdt_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3214) 	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3215) 					bytes_to_copy / sizeof(__be32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3217) 	/* Smart Dump table is ready to use and the first entry is valid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3218) 	if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3219) 	    (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3220) 		dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3221) 			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3222) 			rc, be32_to_cpu(sdt->hdr.state));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3223) 		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3224) 		ioa_cfg->sdt_state = DUMP_OBTAINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3225) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3226) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3229) 	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3231) 	if (num_entries > max_num_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3232) 		num_entries = max_num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3234) 	/* Update dump length to the actual data to be copied */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3235) 	dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3236) 	if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3237) 		dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3238) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3239) 		dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3241) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3243) 	for (i = 0; i < num_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3244) 		if (ioa_dump->hdr.len > max_dump_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3245) 			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3246) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3247) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3249) 		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3250) 			sdt_word = be32_to_cpu(sdt->entry[i].start_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3251) 			if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3252) 				bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3253) 			else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3254) 				start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3255) 				end_off = be32_to_cpu(sdt->entry[i].end_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3257) 				if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3258) 					bytes_to_copy = end_off - start_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3259) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3260) 					valid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3261) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3262) 			if (valid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3263) 				if (bytes_to_copy > max_dump_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3264) 					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3265) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3266) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3268) 				/* Copy data from adapter to driver buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3269) 				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3270) 							    bytes_to_copy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3272) 				ioa_dump->hdr.len += bytes_copied;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3274) 				if (bytes_copied != bytes_to_copy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3275) 					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3276) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3277) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3278) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3279) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3282) 	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3284) 	/* Update dump_header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3285) 	driver_dump->hdr.len += ioa_dump->hdr.len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3286) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3287) 	ioa_cfg->sdt_state = DUMP_OBTAINED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3288) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3291) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3292) #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3293) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3295) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3296)  * ipr_release_dump - Free adapter dump memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3297)  * @kref:	kref struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3298)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3299)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3300)  *	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3301)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3302) static void ipr_release_dump(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3304) 	struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3305) 	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3306) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3307) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3309) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3310) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3311) 	ioa_cfg->dump = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3312) 	ioa_cfg->sdt_state = INACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3313) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3315) 	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3316) 		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3318) 	vfree(dump->ioa_dump.ioa_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3319) 	kfree(dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3320) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3323) static void ipr_add_remove_thread(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3325) 	unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3326) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3327) 	struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3328) 	struct ipr_ioa_cfg *ioa_cfg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3329) 		container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3330) 	u8 bus, target, lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3331) 	int did_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3333) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3334) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3336) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3337) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3338) 		did_work = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3339) 		if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3340) 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3341) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3342) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3344) 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3345) 			if (res->del_from_ml && res->sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3346) 				did_work = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3347) 				sdev = res->sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3348) 				if (!scsi_device_get(sdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3349) 					if (!res->add_to_ml)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3350) 						list_move_tail(&res->queue, &ioa_cfg->free_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3351) 					else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3352) 						res->del_from_ml = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3353) 					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3354) 					scsi_remove_device(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3355) 					scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3356) 					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3357) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3358) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3359) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3360) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3361) 	} while (did_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3363) 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3364) 		if (res->add_to_ml) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3365) 			bus = res->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3366) 			target = res->target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3367) 			lun = res->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3368) 			res->add_to_ml = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3369) 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3370) 			scsi_add_device(ioa_cfg->host, bus, target, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3371) 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3372) 			goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3373) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3376) 	ioa_cfg->scan_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3377) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3378) 	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3379) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3382) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3383)  * ipr_worker_thread - Worker thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3384)  * @work:		ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3385)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3386)  * Called at task level from a work thread. This function takes care
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3387)  * of adding and removing device from the mid-layer as configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3388)  * changes are detected by the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3389)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3390)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3391)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3392)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3393) static void ipr_worker_thread(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3395) 	unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3396) 	struct ipr_dump *dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3397) 	struct ipr_ioa_cfg *ioa_cfg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3398) 		container_of(work, struct ipr_ioa_cfg, work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3400) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3401) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3403) 	if (ioa_cfg->sdt_state == READ_DUMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3404) 		dump = ioa_cfg->dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3405) 		if (!dump) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3406) 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3407) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3408) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3409) 		kref_get(&dump->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3410) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3411) 		ipr_get_ioa_dump(ioa_cfg, dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3412) 		kref_put(&dump->kref, ipr_release_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3414) 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3415) 		if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3416) 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3417) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3418) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3421) 	if (ioa_cfg->scsi_unblock) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3422) 		ioa_cfg->scsi_unblock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3423) 		ioa_cfg->scsi_blocked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3424) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3425) 		scsi_unblock_requests(ioa_cfg->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3426) 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3427) 		if (ioa_cfg->scsi_blocked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3428) 			scsi_block_requests(ioa_cfg->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3431) 	if (!ioa_cfg->scan_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3432) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3433) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3436) 	schedule_work(&ioa_cfg->scsi_add_work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3438) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3439) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3442) #ifdef CONFIG_SCSI_IPR_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3443) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3444)  * ipr_read_trace - Dump the adapter trace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3445)  * @filp:		open sysfs file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3446)  * @kobj:		kobject struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3447)  * @bin_attr:		bin_attribute struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3448)  * @buf:		buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3449)  * @off:		offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3450)  * @count:		buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3451)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3452)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3453)  *	number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3454)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3455) static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3456) 			      struct bin_attribute *bin_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3457) 			      char *buf, loff_t off, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3459) 	struct device *dev = container_of(kobj, struct device, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3460) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3461) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3462) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3463) 	ssize_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3465) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3466) 	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3467) 				IPR_TRACE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3468) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3470) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3473) static struct bin_attribute ipr_trace_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3474) 	.attr =	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3475) 		.name = "trace",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3476) 		.mode = S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3477) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3478) 	.size = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3479) 	.read = ipr_read_trace,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3480) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3481) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3483) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3484)  * ipr_show_fw_version - Show the firmware version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3485)  * @dev:	class device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3486)  * @attr:	device attribute (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3487)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3488)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3489)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3490)  *	number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3491)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3492) static ssize_t ipr_show_fw_version(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3493) 				   struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3495) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3496) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3497) 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3498) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3499) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3501) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3502) 	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3503) 		       ucode_vpd->major_release, ucode_vpd->card_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3504) 		       ucode_vpd->minor_release[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3505) 		       ucode_vpd->minor_release[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3506) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3507) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3510) static struct device_attribute ipr_fw_version_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3511) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3512) 		.name =		"fw_version",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3513) 		.mode =		S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3514) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3515) 	.show = ipr_show_fw_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3516) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3518) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3519)  * ipr_show_log_level - Show the adapter's error logging level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3520)  * @dev:	class device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3521)  * @attr:	device attribute (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3522)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3523)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3524)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3525)  * 	number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3526)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3527) static ssize_t ipr_show_log_level(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3528) 				   struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3530) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3531) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3532) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3533) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3535) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3536) 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3537) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3538) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3541) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3542)  * ipr_store_log_level - Change the adapter's error logging level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3543)  * @dev:	class device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3544)  * @attr:	device attribute (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3545)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3546)  * @count:	buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3547)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3548)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3549)  * 	number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3550)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3551) static ssize_t ipr_store_log_level(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3552) 				   struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3553) 				   const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3555) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3556) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3557) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3559) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3560) 	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3561) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3562) 	return strlen(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3565) static struct device_attribute ipr_log_level_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3566) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3567) 		.name =		"log_level",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3568) 		.mode =		S_IRUGO | S_IWUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3569) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3570) 	.show = ipr_show_log_level,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3571) 	.store = ipr_store_log_level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3572) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3574) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3575)  * ipr_store_diagnostics - IOA Diagnostics interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3576)  * @dev:	device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3577)  * @attr:	device attribute (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3578)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3579)  * @count:	buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3580)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3581)  * This function will reset the adapter and wait a reasonable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3582)  * amount of time for any errors that the adapter might log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3583)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3584)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3585)  * 	count on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3586)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3587) static ssize_t ipr_store_diagnostics(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3588) 				     struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3589) 				     const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3591) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3592) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3593) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3594) 	int rc = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3596) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3597) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3599) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3600) 	while (ioa_cfg->in_reset_reload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3601) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3602) 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3603) 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3604) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3606) 	ioa_cfg->errors_logged = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3607) 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3609) 	if (ioa_cfg->in_reset_reload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3610) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3611) 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3613) 		/* Wait for a second for any errors to be logged */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3614) 		msleep(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3615) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3616) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3617) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3618) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3620) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3621) 	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3622) 		rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3623) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3625) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3628) static struct device_attribute ipr_diagnostics_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3629) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3630) 		.name =		"run_diagnostics",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3631) 		.mode =		S_IWUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3632) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3633) 	.store = ipr_store_diagnostics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3634) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3636) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3637)  * ipr_show_adapter_state - Show the adapter's state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3638)  * @dev:	device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3639)  * @attr:	device attribute (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3640)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3641)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3642)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3643)  * 	number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3644)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3645) static ssize_t ipr_show_adapter_state(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3646) 				      struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3648) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3649) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3650) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3651) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3653) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3654) 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3655) 		len = snprintf(buf, PAGE_SIZE, "offline\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3656) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3657) 		len = snprintf(buf, PAGE_SIZE, "online\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3658) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3659) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3660) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3662) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3663)  * ipr_store_adapter_state - Change adapter state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3664)  * @dev:	device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3665)  * @attr:	device attribute (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3666)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3667)  * @count:	buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3668)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3669)  * This function will change the adapter's state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3670)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3671)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3672)  * 	count on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3673)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3674) static ssize_t ipr_store_adapter_state(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3675) 				       struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3676) 				       const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3678) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3679) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3680) 	unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3681) 	int result = count, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3683) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3684) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3686) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3687) 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3688) 	    !strncmp(buf, "online", 6)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3689) 		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3690) 			spin_lock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3691) 			ioa_cfg->hrrq[i].ioa_is_dead = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3692) 			spin_unlock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3693) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3694) 		wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3695) 		ioa_cfg->reset_retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3696) 		ioa_cfg->in_ioa_bringdown = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3697) 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3698) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3699) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3700) 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3702) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3705) static struct device_attribute ipr_ioa_state_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3706) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3707) 		.name =		"online_state",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3708) 		.mode =		S_IRUGO | S_IWUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3709) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3710) 	.show = ipr_show_adapter_state,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3711) 	.store = ipr_store_adapter_state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3712) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3714) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3715)  * ipr_store_reset_adapter - Reset the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3716)  * @dev:	device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3717)  * @attr:	device attribute (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3718)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3719)  * @count:	buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3720)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3721)  * This function will reset the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3722)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3723)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3724)  * 	count on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3725)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3726) static ssize_t ipr_store_reset_adapter(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3727) 				       struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3728) 				       const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3730) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3731) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3732) 	unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3733) 	int result = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3735) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3736) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3738) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3739) 	if (!ioa_cfg->in_reset_reload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3740) 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3741) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3742) 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3744) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3747) static struct device_attribute ipr_ioa_reset_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3748) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3749) 		.name =		"reset_host",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3750) 		.mode =		S_IWUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3751) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3752) 	.store = ipr_store_reset_adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3753) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3755) static int ipr_iopoll(struct irq_poll *iop, int budget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3756)  /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3757)  * ipr_show_iopoll_weight - Show ipr polling mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3758)  * @dev:	class device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3759)  * @attr:	device attribute (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3760)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3761)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3762)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3763)  *	number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3764)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3765) static ssize_t ipr_show_iopoll_weight(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3766) 				   struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3768) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3769) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3770) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3771) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3773) 	spin_lock_irqsave(shost->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3774) 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3775) 	spin_unlock_irqrestore(shost->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3777) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3780) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3781)  * ipr_store_iopoll_weight - Change the adapter's polling mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3782)  * @dev:	class device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3783)  * @attr:	device attribute (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3784)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3785)  * @count:	buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3786)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3787)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3788)  *	number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3789)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3790) static ssize_t ipr_store_iopoll_weight(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3791) 					struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3792) 					const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3793) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3794) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3795) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3796) 	unsigned long user_iopoll_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3797) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3798) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3800) 	if (!ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3801) 		dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3802) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3803) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3804) 	if (kstrtoul(buf, 10, &user_iopoll_weight))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3805) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3807) 	if (user_iopoll_weight > 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3808) 		dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3809) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3810) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3812) 	if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3813) 		dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3814) 		return strlen(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3815) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3817) 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3818) 		for (i = 1; i < ioa_cfg->hrrq_num; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3819) 			irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3820) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3822) 	spin_lock_irqsave(shost->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3823) 	ioa_cfg->iopoll_weight = user_iopoll_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3824) 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3825) 		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3826) 			irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3827) 					ioa_cfg->iopoll_weight, ipr_iopoll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3828) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3830) 	spin_unlock_irqrestore(shost->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3832) 	return strlen(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3835) static struct device_attribute ipr_iopoll_weight_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3836) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3837) 		.name =		"iopoll_weight",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3838) 		.mode =		S_IRUGO | S_IWUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3839) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3840) 	.show = ipr_show_iopoll_weight,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3841) 	.store = ipr_store_iopoll_weight
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3842) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3844) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3845)  * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3846)  * @buf_len:		buffer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3847)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3848)  * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3849)  * list to use for microcode download
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3850)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3851)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3852)  * 	pointer to sglist / NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3853)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3854) static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3856) 	int sg_size, order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3857) 	struct ipr_sglist *sglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3859) 	/* Get the minimum size per scatter/gather element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3860) 	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3862) 	/* Get the actual size per element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3863) 	order = get_order(sg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3865) 	/* Allocate a scatter/gather list for the DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3866) 	sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3867) 	if (sglist == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3868) 		ipr_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3869) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3871) 	sglist->order = order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3872) 	sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3873) 					      &sglist->num_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3874) 	if (!sglist->scatterlist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3875) 		kfree(sglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3876) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3877) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3879) 	return sglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3880) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3882) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3883)  * ipr_free_ucode_buffer - Frees a microcode download buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3884)  * @sglist:		scatter/gather list pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3885)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3886)  * Free a DMA'able ucode download buffer previously allocated with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3887)  * ipr_alloc_ucode_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3888)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3889)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3890)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3891)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3892) static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3894) 	sgl_free_order(sglist->scatterlist, sglist->order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3895) 	kfree(sglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3898) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3899)  * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3900)  * @sglist:		scatter/gather list pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3901)  * @buffer:		buffer pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3902)  * @len:		buffer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3903)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3904)  * Copy a microcode image from a user buffer into a buffer allocated by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3905)  * ipr_alloc_ucode_buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3906)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3907)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3908)  * 	0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3909)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3910) static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3911) 				 u8 *buffer, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3912) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3913) 	int bsize_elem, i, result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3914) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3915) 	void *kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3917) 	/* Determine the actual number of bytes per element */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3918) 	bsize_elem = PAGE_SIZE * (1 << sglist->order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3920) 	sg = sglist->scatterlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3922) 	for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3923) 			buffer += bsize_elem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3924) 		struct page *page = sg_page(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3926) 		kaddr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3927) 		memcpy(kaddr, buffer, bsize_elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3928) 		kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3930) 		sg->length = bsize_elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3932) 		if (result != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3933) 			ipr_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3934) 			return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3935) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3936) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3938) 	if (len % bsize_elem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3939) 		struct page *page = sg_page(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3941) 		kaddr = kmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3942) 		memcpy(kaddr, buffer, len % bsize_elem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3943) 		kunmap(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3945) 		sg->length = len % bsize_elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3948) 	sglist->buffer_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3949) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3950) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3952) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3953)  * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3954)  * @ipr_cmd:		ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3955)  * @sglist:		scatter/gather list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3956)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3957)  * Builds a microcode download IOA data list (IOADL).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3958)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3959)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3960) static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3961) 				    struct ipr_sglist *sglist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3963) 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3964) 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3965) 	struct scatterlist *scatterlist = sglist->scatterlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3966) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3967) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3969) 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3970) 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3971) 	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3973) 	ioarcb->ioadl_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3974) 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3975) 	for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3976) 		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3977) 		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3978) 		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3979) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3981) 	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3984) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3985)  * ipr_build_ucode_ioadl - Build a microcode download IOADL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3986)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3987)  * @sglist:		scatter/gather list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3988)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3989)  * Builds a microcode download IOA data list (IOADL).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3990)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3991)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3992) static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3993) 				  struct ipr_sglist *sglist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3994) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3995) 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3996) 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3997) 	struct scatterlist *scatterlist = sglist->scatterlist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3998) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  3999) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4001) 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4002) 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4003) 	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4005) 	ioarcb->ioadl_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4006) 		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4008) 	for_each_sg(scatterlist, sg, ipr_cmd->dma_use_sg, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4009) 		ioadl[i].flags_and_data_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4010) 			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4011) 		ioadl[i].address =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4012) 			cpu_to_be32(sg_dma_address(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4013) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4015) 	ioadl[i-1].flags_and_data_len |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4016) 		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4019) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4020)  * ipr_update_ioa_ucode - Update IOA's microcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4021)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4022)  * @sglist:		scatter/gather list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4023)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4024)  * Initiate an adapter reset to update the IOA's microcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4025)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4026)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4027)  * 	0 on success / -EIO on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4028)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4029) static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4030) 				struct ipr_sglist *sglist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4032) 	unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4034) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4035) 	while (ioa_cfg->in_reset_reload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4036) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4037) 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4038) 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4039) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4041) 	if (ioa_cfg->ucode_sglist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4042) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4043) 		dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4044) 			"Microcode download already in progress\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4045) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4046) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4048) 	sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4049) 					sglist->scatterlist, sglist->num_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4050) 					DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4052) 	if (!sglist->num_dma_sg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4053) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4054) 		dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4055) 			"Failed to map microcode download buffer!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4056) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4057) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4059) 	ioa_cfg->ucode_sglist = sglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4060) 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4061) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4062) 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4064) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4065) 	ioa_cfg->ucode_sglist = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4066) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4067) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4068) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4070) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4071)  * ipr_store_update_fw - Update the firmware on the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4072)  * @dev:	device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4073)  * @attr:	device attribute (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4074)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4075)  * @count:	buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4076)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4077)  * This function will update the firmware on the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4078)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4079)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4080)  * 	count on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4081)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4082) static ssize_t ipr_store_update_fw(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4083) 				   struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4084) 				   const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4086) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4087) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4088) 	struct ipr_ucode_image_header *image_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4089) 	const struct firmware *fw_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4090) 	struct ipr_sglist *sglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4091) 	char fname[100];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4092) 	char *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4093) 	char *endline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4094) 	int result, dnld_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4096) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4097) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4099) 	snprintf(fname, sizeof(fname), "%s", buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4101) 	endline = strchr(fname, '\n');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4102) 	if (endline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4103) 		*endline = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4105) 	if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4106) 		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4107) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4108) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4110) 	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4112) 	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4113) 	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4114) 	sglist = ipr_alloc_ucode_buffer(dnld_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4116) 	if (!sglist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4117) 		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4118) 		release_firmware(fw_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4119) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4120) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4122) 	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4124) 	if (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4125) 		dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4126) 			"Microcode buffer copy to DMA buffer failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4127) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4128) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4130) 	ipr_info("Updating microcode, please be patient.  This may take up to 30 minutes.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4132) 	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4134) 	if (!result)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4135) 		result = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4136) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4137) 	ipr_free_ucode_buffer(sglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4138) 	release_firmware(fw_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4139) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4142) static struct device_attribute ipr_update_fw_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4143) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4144) 		.name =		"update_fw",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4145) 		.mode =		S_IWUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4146) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4147) 	.store = ipr_store_update_fw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4148) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4150) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4151)  * ipr_show_fw_type - Show the adapter's firmware type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4152)  * @dev:	class device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4153)  * @attr:	device attribute (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4154)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4155)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4156)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4157)  *	number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4158)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4159) static ssize_t ipr_show_fw_type(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4160) 				struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4162) 	struct Scsi_Host *shost = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4163) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4164) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4165) 	int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4167) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4168) 	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4169) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4170) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4173) static struct device_attribute ipr_ioa_fw_type_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4174) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4175) 		.name =		"fw_type",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4176) 		.mode =		S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4177) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4178) 	.show = ipr_show_fw_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4179) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4181) static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4182) 				struct bin_attribute *bin_attr, char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4183) 				loff_t off, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4185) 	struct device *cdev = container_of(kobj, struct device, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4186) 	struct Scsi_Host *shost = class_to_shost(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4187) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4188) 	struct ipr_hostrcb *hostrcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4189) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4190) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4192) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4193) 	hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4194) 					struct ipr_hostrcb, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4195) 	if (!hostrcb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4196) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4197) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4199) 	ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4200) 				sizeof(hostrcb->hcam));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4201) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4202) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4205) static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4206) 				struct bin_attribute *bin_attr, char *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4207) 				loff_t off, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4209) 	struct device *cdev = container_of(kobj, struct device, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4210) 	struct Scsi_Host *shost = class_to_shost(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4211) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4212) 	struct ipr_hostrcb *hostrcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4213) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4215) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4216) 	hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4217) 					struct ipr_hostrcb, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4218) 	if (!hostrcb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4219) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4220) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4223) 	/* Reclaim hostrcb before exit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4224) 	list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4225) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4226) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4229) static struct bin_attribute ipr_ioa_async_err_log = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4230) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4231) 		.name =		"async_err_log",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4232) 		.mode =		S_IRUGO | S_IWUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4233) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4234) 	.size = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4235) 	.read = ipr_read_async_err_log,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4236) 	.write = ipr_next_async_err_log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4237) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4239) static struct device_attribute *ipr_ioa_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4240) 	&ipr_fw_version_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4241) 	&ipr_log_level_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4242) 	&ipr_diagnostics_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4243) 	&ipr_ioa_state_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4244) 	&ipr_ioa_reset_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4245) 	&ipr_update_fw_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4246) 	&ipr_ioa_fw_type_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4247) 	&ipr_iopoll_weight_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4248) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4249) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4251) #ifdef CONFIG_SCSI_IPR_DUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4252) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4253)  * ipr_read_dump - Dump the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4254)  * @filp:		open sysfs file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4255)  * @kobj:		kobject struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4256)  * @bin_attr:		bin_attribute struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4257)  * @buf:		buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4258)  * @off:		offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4259)  * @count:		buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4260)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4261)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4262)  *	number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4263)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4264) static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4265) 			     struct bin_attribute *bin_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4266) 			     char *buf, loff_t off, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4268) 	struct device *cdev = container_of(kobj, struct device, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4269) 	struct Scsi_Host *shost = class_to_shost(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4270) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4271) 	struct ipr_dump *dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4272) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4273) 	char *src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4274) 	int len, sdt_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4275) 	size_t rc = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4277) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4278) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4280) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4281) 	dump = ioa_cfg->dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4283) 	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4284) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4285) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4287) 	kref_get(&dump->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4288) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4290) 	if (off > dump->driver_dump.hdr.len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4291) 		kref_put(&dump->kref, ipr_release_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4292) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4295) 	if (off + count > dump->driver_dump.hdr.len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4296) 		count = dump->driver_dump.hdr.len - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4297) 		rc = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4298) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4300) 	if (count && off < sizeof(dump->driver_dump)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4301) 		if (off + count > sizeof(dump->driver_dump))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4302) 			len = sizeof(dump->driver_dump) - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4303) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4304) 			len = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4305) 		src = (u8 *)&dump->driver_dump + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4306) 		memcpy(buf, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4307) 		buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4308) 		off += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4309) 		count -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4312) 	off -= sizeof(dump->driver_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4314) 	if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4315) 		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4316) 			  (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4317) 			   sizeof(struct ipr_sdt_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4318) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4319) 		sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4320) 			  (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4322) 	if (count && off < sdt_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4323) 		if (off + count > sdt_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4324) 			len = sdt_end - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4325) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4326) 			len = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4327) 		src = (u8 *)&dump->ioa_dump + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4328) 		memcpy(buf, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4329) 		buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4330) 		off += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4331) 		count -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4334) 	off -= sdt_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4336) 	while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4337) 		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4338) 			len = PAGE_ALIGN(off) - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4339) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4340) 			len = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4341) 		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4342) 		src += off & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4343) 		memcpy(buf, src, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4344) 		buf += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4345) 		off += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4346) 		count -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4347) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4349) 	kref_put(&dump->kref, ipr_release_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4350) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4353) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4354)  * ipr_alloc_dump - Prepare for adapter dump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4355)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4356)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4357)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4358)  *	0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4359)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4360) static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4362) 	struct ipr_dump *dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4363) 	__be32 **ioa_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4364) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4366) 	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4368) 	if (!dump) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4369) 		ipr_err("Dump memory allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4370) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4373) 	if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4374) 		ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4375) 					      sizeof(__be32 *)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4376) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4377) 		ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4378) 					      sizeof(__be32 *)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4380) 	if (!ioa_data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4381) 		ipr_err("Dump memory allocation failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4382) 		kfree(dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4383) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4386) 	dump->ioa_dump.ioa_data = ioa_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4388) 	kref_init(&dump->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4389) 	dump->ioa_cfg = ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4391) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4393) 	if (INACTIVE != ioa_cfg->sdt_state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4394) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4395) 		vfree(dump->ioa_dump.ioa_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4396) 		kfree(dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4397) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4400) 	ioa_cfg->dump = dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4401) 	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4402) 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4403) 		ioa_cfg->dump_taken = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4404) 		schedule_work(&ioa_cfg->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4405) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4406) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4408) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4411) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4412)  * ipr_free_dump - Free adapter dump memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4413)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4414)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4415)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4416)  *	0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4417)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4418) static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4420) 	struct ipr_dump *dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4421) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4423) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4425) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4426) 	dump = ioa_cfg->dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4427) 	if (!dump) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4428) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4429) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4430) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4432) 	ioa_cfg->dump = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4433) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4435) 	kref_put(&dump->kref, ipr_release_dump);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4437) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4438) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4441) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4442)  * ipr_write_dump - Setup dump state of adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4443)  * @filp:		open sysfs file
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4444)  * @kobj:		kobject struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4445)  * @bin_attr:		bin_attribute struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4446)  * @buf:		buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4447)  * @off:		offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4448)  * @count:		buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4449)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4450)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4451)  *	number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4452)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4453) static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4454) 			      struct bin_attribute *bin_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4455) 			      char *buf, loff_t off, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4457) 	struct device *cdev = container_of(kobj, struct device, kobj);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4458) 	struct Scsi_Host *shost = class_to_shost(cdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4459) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4460) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4462) 	if (!capable(CAP_SYS_ADMIN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4463) 		return -EACCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4465) 	if (buf[0] == '1')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4466) 		rc = ipr_alloc_dump(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4467) 	else if (buf[0] == '0')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4468) 		rc = ipr_free_dump(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4469) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4470) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4472) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4473) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4474) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4475) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4478) static struct bin_attribute ipr_dump_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4479) 	.attr =	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4480) 		.name = "dump",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4481) 		.mode = S_IRUSR | S_IWUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4482) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4483) 	.size = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4484) 	.read = ipr_read_dump,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4485) 	.write = ipr_write_dump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4486) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4487) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4488) static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4489) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4491) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4492)  * ipr_change_queue_depth - Change the device's queue depth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4493)  * @sdev:	scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4494)  * @qdepth:	depth to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4495)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4496)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4497)  * 	actual depth set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4498)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4499) static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4501) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4502) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4503) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4505) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4506) 	res = (struct ipr_resource_entry *)sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4508) 	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4509) 		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4510) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4512) 	scsi_change_queue_depth(sdev, qdepth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4513) 	return sdev->queue_depth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4516) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4517)  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4518)  * @dev:	device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4519)  * @attr:	device attribute structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4520)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4521)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4522)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4523)  * 	number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4524)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4525) static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4527) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4528) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4529) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4530) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4531) 	ssize_t len = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4533) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4534) 	res = (struct ipr_resource_entry *)sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4535) 	if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4536) 		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4537) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4538) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4541) static struct device_attribute ipr_adapter_handle_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4542) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4543) 		.name = 	"adapter_handle",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4544) 		.mode =		S_IRUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4545) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4546) 	.show = ipr_show_adapter_handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4547) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4549) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4550)  * ipr_show_resource_path - Show the resource path or the resource address for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4551)  *			    this device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4552)  * @dev:	device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4553)  * @attr:	device attribute structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4554)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4555)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4556)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4557)  * 	number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4558)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4559) static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4561) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4562) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4563) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4564) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4565) 	ssize_t len = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4566) 	char buffer[IPR_MAX_RES_PATH_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4568) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4569) 	res = (struct ipr_resource_entry *)sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4570) 	if (res && ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4571) 		len = snprintf(buf, PAGE_SIZE, "%s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4572) 			       __ipr_format_res_path(res->res_path, buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4573) 						     sizeof(buffer)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4574) 	else if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4575) 		len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4576) 			       res->bus, res->target, res->lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4578) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4579) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4582) static struct device_attribute ipr_resource_path_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4583) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4584) 		.name = 	"resource_path",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4585) 		.mode =		S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4586) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4587) 	.show = ipr_show_resource_path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4588) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4590) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4591)  * ipr_show_device_id - Show the device_id for this device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4592)  * @dev:	device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4593)  * @attr:	device attribute structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4594)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4595)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4596)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4597)  *	number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4598)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4599) static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4600) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4601) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4602) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4603) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4604) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4605) 	ssize_t len = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4607) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4608) 	res = (struct ipr_resource_entry *)sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4609) 	if (res && ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4610) 		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4611) 	else if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4612) 		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4614) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4615) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4618) static struct device_attribute ipr_device_id_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4619) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4620) 		.name =		"device_id",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4621) 		.mode =		S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4622) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4623) 	.show = ipr_show_device_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4624) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4626) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4627)  * ipr_show_resource_type - Show the resource type for this device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4628)  * @dev:	device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4629)  * @attr:	device attribute structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4630)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4631)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4632)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4633)  *	number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4634)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4635) static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4637) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4638) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4639) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4640) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4641) 	ssize_t len = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4643) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4644) 	res = (struct ipr_resource_entry *)sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4646) 	if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4647) 		len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4649) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4650) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4653) static struct device_attribute ipr_resource_type_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4654) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4655) 		.name =		"resource_type",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4656) 		.mode =		S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4657) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4658) 	.show = ipr_show_resource_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4659) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4661) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4662)  * ipr_show_raw_mode - Show the adapter's raw mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4663)  * @dev:	class device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4664)  * @attr:	device attribute (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4665)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4666)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4667)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4668)  * 	number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4669)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4670) static ssize_t ipr_show_raw_mode(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4671) 				 struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4673) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4674) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4675) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4676) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4677) 	ssize_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4679) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4680) 	res = (struct ipr_resource_entry *)sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4681) 	if (res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4682) 		len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4683) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4684) 		len = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4685) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4686) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4689) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4690)  * ipr_store_raw_mode - Change the adapter's raw mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4691)  * @dev:	class device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4692)  * @attr:	device attribute (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4693)  * @buf:	buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4694)  * @count:		buffer size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4695)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4696)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4697)  * 	number of bytes printed to buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4698)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4699) static ssize_t ipr_store_raw_mode(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4700) 				  struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4701) 				  const char *buf, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4703) 	struct scsi_device *sdev = to_scsi_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4704) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4705) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4706) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4707) 	ssize_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4709) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4710) 	res = (struct ipr_resource_entry *)sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4711) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4712) 		if (ipr_is_af_dasd_device(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4713) 			res->raw_mode = simple_strtoul(buf, NULL, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4714) 			len = strlen(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4715) 			if (res->sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4716) 				sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4717) 					res->raw_mode ? "enabled" : "disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4718) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4719) 			len = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4720) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4721) 		len = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4722) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4723) 	return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4726) static struct device_attribute ipr_raw_mode_attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4727) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4728) 		.name =		"raw_mode",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4729) 		.mode =		S_IRUGO | S_IWUSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4730) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4731) 	.show = ipr_show_raw_mode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4732) 	.store = ipr_store_raw_mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4733) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4735) static struct device_attribute *ipr_dev_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4736) 	&ipr_adapter_handle_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4737) 	&ipr_resource_path_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4738) 	&ipr_device_id_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4739) 	&ipr_resource_type_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4740) 	&ipr_raw_mode_attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4741) 	NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4742) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4744) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4745)  * ipr_biosparam - Return the HSC mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4746)  * @sdev:			scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4747)  * @block_device:	block device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4748)  * @capacity:		capacity of the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4749)  * @parm:			Array containing returned HSC values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4750)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4751)  * This function generates the HSC parms that fdisk uses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4752)  * We want to make sure we return something that places partitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4753)  * on 4k boundaries for best performance with the IOA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4754)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4755)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4756)  * 	0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4757)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4758) static int ipr_biosparam(struct scsi_device *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4759) 			 struct block_device *block_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4760) 			 sector_t capacity, int *parm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4762) 	int heads, sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4763) 	sector_t cylinders;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4765) 	heads = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4766) 	sectors = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4768) 	cylinders = capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4769) 	sector_div(cylinders, (128 * 32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4771) 	/* return result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4772) 	parm[0] = heads;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4773) 	parm[1] = sectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4774) 	parm[2] = cylinders;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4776) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4779) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4780)  * ipr_find_starget - Find target based on bus/target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4781)  * @starget:	scsi target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4782)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4783)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4784)  * 	resource entry pointer if found / NULL if not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4785)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4786) static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4788) 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4789) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4790) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4792) 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4793) 		if ((res->bus == starget->channel) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4794) 		    (res->target == starget->id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4795) 			return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4796) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4797) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4799) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4802) static struct ata_port_info sata_port_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4804) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4805)  * ipr_target_alloc - Prepare for commands to a SCSI target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4806)  * @starget:	scsi target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4807)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4808)  * If the device is a SATA device, this function allocates an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4809)  * ATA port with libata, else it does nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4810)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4811)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4812)  * 	0 on success / non-0 on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4813)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4814) static int ipr_target_alloc(struct scsi_target *starget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4816) 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4817) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4818) 	struct ipr_sata_port *sata_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4819) 	struct ata_port *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4820) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4821) 	unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4823) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4824) 	res = ipr_find_starget(starget);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4825) 	starget->hostdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4827) 	if (res && ipr_is_gata(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4828) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4829) 		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4830) 		if (!sata_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4831) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4833) 		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4834) 		if (ap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4835) 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4836) 			sata_port->ioa_cfg = ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4837) 			sata_port->ap = ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4838) 			sata_port->res = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4840) 			res->sata_port = sata_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4841) 			ap->private_data = sata_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4842) 			starget->hostdata = sata_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4843) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4844) 			kfree(sata_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4845) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4846) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4848) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4850) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4853) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4854)  * ipr_target_destroy - Destroy a SCSI target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4855)  * @starget:	scsi target struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4856)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4857)  * If the device was a SATA device, this function frees the libata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4858)  * ATA port, else it does nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4859)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4860)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4861) static void ipr_target_destroy(struct scsi_target *starget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4863) 	struct ipr_sata_port *sata_port = starget->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4864) 	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4865) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4867) 	if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4868) 		if (!ipr_find_starget(starget)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4869) 			if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4870) 				clear_bit(starget->id, ioa_cfg->array_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4871) 			else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4872) 				clear_bit(starget->id, ioa_cfg->vset_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4873) 			else if (starget->channel == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4874) 				clear_bit(starget->id, ioa_cfg->target_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4875) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4876) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4878) 	if (sata_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4879) 		starget->hostdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4880) 		ata_sas_port_destroy(sata_port->ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4881) 		kfree(sata_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4882) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4885) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4886)  * ipr_find_sdev - Find device based on bus/target/lun.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4887)  * @sdev:	scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4888)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4889)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4890)  * 	resource entry pointer if found / NULL if not found
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4891)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4892) static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4894) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4895) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4897) 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4898) 		if ((res->bus == sdev->channel) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4899) 		    (res->target == sdev->id) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4900) 		    (res->lun == sdev->lun))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4901) 			return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4902) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4904) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4905) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4907) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4908)  * ipr_slave_destroy - Unconfigure a SCSI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4909)  * @sdev:	scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4910)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4911)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4912)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4913)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4914) static void ipr_slave_destroy(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4916) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4917) 	struct ipr_ioa_cfg *ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4918) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4920) 	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4922) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4923) 	res = (struct ipr_resource_entry *) sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4924) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4925) 		if (res->sata_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4926) 			res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4927) 		sdev->hostdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4928) 		res->sdev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4929) 		res->sata_port = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4930) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4931) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4934) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4935)  * ipr_slave_configure - Configure a SCSI device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4936)  * @sdev:	scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4937)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4938)  * This function configures the specified scsi device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4939)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4940)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4941)  * 	0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4942)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4943) static int ipr_slave_configure(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4945) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4946) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4947) 	struct ata_port *ap = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4948) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4949) 	char buffer[IPR_MAX_RES_PATH_LENGTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4951) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4952) 	res = sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4953) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4954) 		if (ipr_is_af_dasd_device(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4955) 			sdev->type = TYPE_RAID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4956) 		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4957) 			sdev->scsi_level = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4958) 			sdev->no_uld_attach = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4959) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4960) 		if (ipr_is_vset_device(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4961) 			sdev->scsi_level = SCSI_SPC_3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4962) 			sdev->no_report_opcodes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4963) 			blk_queue_rq_timeout(sdev->request_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4964) 					     IPR_VSET_RW_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4965) 			blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4966) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4967) 		if (ipr_is_gata(res) && res->sata_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4968) 			ap = res->sata_port->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4969) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4971) 		if (ap) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4972) 			scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4973) 			ata_sas_slave_configure(sdev, ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4974) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4976) 		if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4977) 			sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4978) 				    ipr_format_res_path(ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4979) 				res->res_path, buffer, sizeof(buffer)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4980) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4981) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4982) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4983) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4984) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4986) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4987)  * ipr_ata_slave_alloc - Prepare for commands to a SATA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4988)  * @sdev:	scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4989)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4990)  * This function initializes an ATA port so that future commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4991)  * sent through queuecommand will work.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4992)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4993)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4994)  * 	0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4995)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4996) static int ipr_ata_slave_alloc(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4998) 	struct ipr_sata_port *sata_port = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  4999) 	int rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5001) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5002) 	if (sdev->sdev_target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5003) 		sata_port = sdev->sdev_target->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5004) 	if (sata_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5005) 		rc = ata_sas_port_init(sata_port->ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5006) 		if (rc == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5007) 			rc = ata_sas_sync_probe(sata_port->ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5008) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5010) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5011) 		ipr_slave_destroy(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5013) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5014) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5015) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5017) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5018)  * ipr_slave_alloc - Prepare for commands to a device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5019)  * @sdev:	scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5020)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5021)  * This function saves a pointer to the resource entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5022)  * in the scsi device struct if the device exists. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5023)  * can then use this pointer in ipr_queuecommand when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5024)  * handling new commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5025)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5026)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5027)  * 	0 on success / -ENXIO if device does not exist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5028)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5029) static int ipr_slave_alloc(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5030) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5031) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5032) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5033) 	unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5034) 	int rc = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5036) 	sdev->hostdata = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5038) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5040) 	res = ipr_find_sdev(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5041) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5042) 		res->sdev = sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5043) 		res->add_to_ml = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5044) 		res->in_erp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5045) 		sdev->hostdata = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5046) 		if (!ipr_is_naca_model(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5047) 			res->needs_sync_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5048) 		rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5049) 		if (ipr_is_gata(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5050) 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5051) 			return ipr_ata_slave_alloc(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5052) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5053) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5055) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5057) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5058) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5060) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5061)  * ipr_match_lun - Match function for specified LUN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5062)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5063)  * @device:		device to match (sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5064)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5065)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5066)  *	1 if command matches sdev / 0 if command does not match sdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5067)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5068) static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5069) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5070) 	if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5071) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5072) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5075) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5076)  * ipr_cmnd_is_free - Check if a command is free or not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5077)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5078)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5079)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5080)  *	true / false
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5081)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5082) static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5083) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5084) 	struct ipr_cmnd *loop_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5086) 	list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5087) 		if (loop_cmd == ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5088) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5089) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5091) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5094) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5095)  * ipr_match_res - Match function for specified resource entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5096)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5097)  * @resource:	resource entry to match
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5098)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5099)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5100)  *	1 if command matches sdev / 0 if command does not match sdev
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5101)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5102) static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5104) 	struct ipr_resource_entry *res = resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5106) 	if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5107) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5108) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5111) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5112)  * ipr_wait_for_ops - Wait for matching commands to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5113)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5114)  * @device:		device to match (sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5115)  * @match:		match function to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5116)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5117)  * Returns:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5118)  *	SUCCESS / FAILED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5119)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5120) static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5121) 			    int (*match)(struct ipr_cmnd *, void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5123) 	struct ipr_cmnd *ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5124) 	int wait, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5125) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5126) 	struct ipr_hrr_queue *hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5127) 	signed long timeout = IPR_ABORT_TASK_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5128) 	DECLARE_COMPLETION_ONSTACK(comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5130) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5131) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5132) 		wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5134) 		for_each_hrrq(hrrq, ioa_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5135) 			spin_lock_irqsave(hrrq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5136) 			for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5137) 				ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5138) 				if (!ipr_cmnd_is_free(ipr_cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5139) 					if (match(ipr_cmd, device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5140) 						ipr_cmd->eh_comp = &comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5141) 						wait++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5142) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5143) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5144) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5145) 			spin_unlock_irqrestore(hrrq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5146) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5148) 		if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5149) 			timeout = wait_for_completion_timeout(&comp, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5151) 			if (!timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5152) 				wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5154) 				for_each_hrrq(hrrq, ioa_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5155) 					spin_lock_irqsave(hrrq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5156) 					for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5157) 						ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5158) 						if (!ipr_cmnd_is_free(ipr_cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5159) 							if (match(ipr_cmd, device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5160) 								ipr_cmd->eh_comp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5161) 								wait++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5162) 							}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5163) 						}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5164) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5165) 					spin_unlock_irqrestore(hrrq->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5166) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5168) 				if (wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5169) 					dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5170) 				LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5171) 				return wait ? FAILED : SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5172) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5173) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5174) 	} while (wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5176) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5177) 	return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5180) static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5182) 	struct ipr_ioa_cfg *ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5183) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5184) 	int rc = SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5186) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5187) 	ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5188) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5190) 	if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5191) 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5192) 		dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5193) 			"Adapter being reset as a result of error recovery.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5195) 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5196) 			ioa_cfg->sdt_state = GET_DUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5199) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5200) 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5201) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5203) 	/* If we got hit with a host reset while we were already resetting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5204) 	 the adapter for some reason, and the reset failed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5205) 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5206) 		ipr_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5207) 		rc = FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5210) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5211) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5212) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5215) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5216)  * ipr_device_reset - Reset the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5217)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5218)  * @res:		resource entry struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5219)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5220)  * This function issues a device reset to the affected device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5221)  * If the device is a SCSI device, a LUN reset will be sent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5222)  * to the device first. If that does not work, a target reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5223)  * will be sent. If the device is a SATA device, a PHY reset will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5224)  * be sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5225)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5226)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5227)  *	0 on success / non-zero on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5228)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5229) static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5230) 			    struct ipr_resource_entry *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5232) 	struct ipr_cmnd *ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5233) 	struct ipr_ioarcb *ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5234) 	struct ipr_cmd_pkt *cmd_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5235) 	struct ipr_ioarcb_ata_regs *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5236) 	u32 ioasc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5238) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5239) 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5240) 	ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5241) 	cmd_pkt = &ioarcb->cmd_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5243) 	if (ipr_cmd->ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5244) 		regs = &ipr_cmd->i.ata_ioadl.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5245) 		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5246) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5247) 		regs = &ioarcb->u.add_data.u.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5249) 	ioarcb->res_handle = res->res_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5250) 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5251) 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5252) 	if (ipr_is_gata(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5253) 		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5254) 		ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5255) 		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5256) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5258) 	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5259) 	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5260) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5261) 	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5262) 		if (ipr_cmd->ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5263) 			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5264) 			       sizeof(struct ipr_ioasa_gata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5265) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5266) 			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5267) 			       sizeof(struct ipr_ioasa_gata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5270) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5271) 	return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5274) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5275)  * ipr_sata_reset - Reset the SATA port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5276)  * @link:	SATA link to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5277)  * @classes:	class of the attached device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5278)  * @deadline:	unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5279)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5280)  * This function issues a SATA phy reset to the affected ATA link.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5281)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5282)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5283)  *	0 on success / non-zero on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5284)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5285) static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5286) 				unsigned long deadline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5288) 	struct ipr_sata_port *sata_port = link->ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5289) 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5290) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5291) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5292) 	int rc = -ENXIO, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5294) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5295) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5296) 	while (ioa_cfg->in_reset_reload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5297) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5298) 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5299) 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5302) 	res = sata_port->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5303) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5304) 		rc = ipr_device_reset(ioa_cfg, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5305) 		*classes = res->ata_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5306) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5308) 		ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5309) 		if (ret != SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5310) 			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5311) 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5312) 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5314) 			wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5315) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5316) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5317) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5319) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5320) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5323) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5324)  * ipr_eh_dev_reset - Reset the device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5325)  * @scsi_cmd:	scsi command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5326)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5327)  * This function issues a device reset to the affected device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5328)  * A LUN reset will be sent to the device first. If that does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5329)  * not work, a target reset will be sent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5330)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5331)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5332)  *	SUCCESS / FAILED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5333)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5334) static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5336) 	struct ipr_cmnd *ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5337) 	struct ipr_ioa_cfg *ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5338) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5339) 	struct ata_port *ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5340) 	int rc = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5341) 	struct ipr_hrr_queue *hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5343) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5344) 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5345) 	res = scsi_cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5347) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5348) 	 * If we are currently going through reset/reload, return failed. This will force the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5349) 	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5350) 	 * reset to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5351) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5352) 	if (ioa_cfg->in_reset_reload)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5353) 		return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5354) 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5355) 		return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5357) 	for_each_hrrq(hrrq, ioa_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5358) 		spin_lock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5359) 		for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5360) 			ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5362) 			if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5363) 				if (!ipr_cmd->qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5364) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5365) 				if (ipr_cmnd_is_free(ipr_cmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5366) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5368) 				ipr_cmd->done = ipr_sata_eh_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5369) 				if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5370) 					ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5371) 					ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5372) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5373) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5374) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5375) 		spin_unlock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5376) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5377) 	res->resetting_device = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5378) 	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5380) 	if (ipr_is_gata(res) && res->sata_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5381) 		ap = res->sata_port->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5382) 		spin_unlock_irq(scsi_cmd->device->host->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5383) 		ata_std_error_handler(ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5384) 		spin_lock_irq(scsi_cmd->device->host->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5385) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5386) 		rc = ipr_device_reset(ioa_cfg, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5387) 	res->resetting_device = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5388) 	res->reset_occurred = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5390) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5391) 	return rc ? FAILED : SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5394) static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5396) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5397) 	struct ipr_ioa_cfg *ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5398) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5400) 	ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5401) 	res = cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5403) 	if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5404) 		return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5406) 	spin_lock_irq(cmd->device->host->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5407) 	rc = __ipr_eh_dev_reset(cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5408) 	spin_unlock_irq(cmd->device->host->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5410) 	if (rc == SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5411) 		if (ipr_is_gata(res) && res->sata_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5412) 			rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5413) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5414) 			rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5415) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5417) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5420) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5421)  * ipr_bus_reset_done - Op done function for bus reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5422)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5423)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5424)  * This function is the op done function for a bus reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5425)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5426)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5427)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5428)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5429) static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5431) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5432) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5434) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5435) 	if (!ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5436) 		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5437) 			if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5438) 				scsi_report_bus_reset(ioa_cfg->host, res->bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5439) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5440) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5441) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5443) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5444) 	 * If abort has not completed, indicate the reset has, else call the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5445) 	 * abort's done function to wake the sleeping eh thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5446) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5447) 	if (ipr_cmd->sibling->sibling)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5448) 		ipr_cmd->sibling->sibling = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5449) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5450) 		ipr_cmd->sibling->done(ipr_cmd->sibling);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5452) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5453) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5456) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5457)  * ipr_abort_timeout - An abort task has timed out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5458)  * @t: Timer context used to fetch ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5459)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5460)  * This function handles when an abort task times out. If this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5461)  * happens we issue a bus reset since we have resources tied
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5462)  * up that must be freed before returning to the midlayer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5463)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5464)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5465)  *	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5466)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5467) static void ipr_abort_timeout(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5469) 	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5470) 	struct ipr_cmnd *reset_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5471) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5472) 	struct ipr_cmd_pkt *cmd_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5473) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5475) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5476) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5477) 	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5478) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5479) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5480) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5482) 	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5483) 	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5484) 	ipr_cmd->sibling = reset_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5485) 	reset_cmd->sibling = ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5486) 	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5487) 	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5488) 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5489) 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5490) 	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5492) 	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5493) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5494) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5497) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5498)  * ipr_cancel_op - Cancel specified op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5499)  * @scsi_cmd:	scsi command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5500)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5501)  * This function cancels specified op.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5502)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5503)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5504)  *	SUCCESS / FAILED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5505)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5506) static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5508) 	struct ipr_cmnd *ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5509) 	struct ipr_ioa_cfg *ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5510) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5511) 	struct ipr_cmd_pkt *cmd_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5512) 	u32 ioasc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5513) 	int i, op_found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5514) 	struct ipr_hrr_queue *hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5516) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5517) 	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5518) 	res = scsi_cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5520) 	/* If we are currently going through reset/reload, return failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5521) 	 * This will force the mid-layer to call ipr_eh_host_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5522) 	 * which will then go to sleep and wait for the reset to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5523) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5524) 	if (ioa_cfg->in_reset_reload ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5525) 	    ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5526) 		return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5527) 	if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5528) 		return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5530) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5531) 	 * If we are aborting a timed out op, chances are that the timeout was caused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5532) 	 * by a still not detected EEH error. In such cases, reading a register will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5533) 	 * trigger the EEH recovery infrastructure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5534) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5535) 	readl(ioa_cfg->regs.sense_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5537) 	if (!ipr_is_gscsi(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5538) 		return FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5540) 	for_each_hrrq(hrrq, ioa_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5541) 		spin_lock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5542) 		for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5543) 			if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5544) 				if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5545) 					op_found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5546) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5547) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5548) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5549) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5550) 		spin_unlock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5553) 	if (!op_found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5554) 		return SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5556) 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5557) 	ipr_cmd->ioarcb.res_handle = res->res_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5558) 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5559) 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5560) 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5561) 	ipr_cmd->u.sdev = scsi_cmd->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5563) 	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5564) 		    scsi_cmd->cmnd[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5565) 	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5566) 	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5568) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5569) 	 * If the abort task timed out and we sent a bus reset, we will get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5570) 	 * one the following responses to the abort
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5571) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5572) 	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5573) 		ioasc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5574) 		ipr_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5577) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5578) 	if (!ipr_is_naca_model(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5579) 		res->needs_sync_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5581) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5582) 	return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5585) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5586)  * ipr_eh_abort - Abort a single op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5587)  * @shost:           scsi host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5588)  * @elapsed_time:    elapsed time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5589)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5590)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5591)  *	0 if scan in progress / 1 if scan is complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5592)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5593) static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5595) 	unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5596) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5597) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5599) 	spin_lock_irqsave(shost->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5600) 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5601) 		rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5602) 	if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5603) 		rc = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5604) 	spin_unlock_irqrestore(shost->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5605) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5608) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5609)  * ipr_eh_host_reset - Reset the host adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5610)  * @scsi_cmd:	scsi command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5611)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5612)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5613)  * 	SUCCESS / FAILED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5614)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5615) static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5617) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5618) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5619) 	struct ipr_ioa_cfg *ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5621) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5623) 	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5625) 	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5626) 	rc = ipr_cancel_op(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5627) 	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5629) 	if (rc == SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5630) 		rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5631) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5632) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5635) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5636)  * ipr_handle_other_interrupt - Handle "other" interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5637)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5638)  * @int_reg:	interrupt register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5639)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5640)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5641)  * 	IRQ_NONE / IRQ_HANDLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5642)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5643) static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5644) 					      u32 int_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5646) 	irqreturn_t rc = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5647) 	u32 int_mask_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5649) 	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5650) 	int_reg &= ~int_mask_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5652) 	/* If an interrupt on the adapter did not occur, ignore it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5653) 	 * Or in the case of SIS 64, check for a stage change interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5654) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5655) 	if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5656) 		if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5657) 			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5658) 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5659) 			if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5661) 				/* clear stage change */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5662) 				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5663) 				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5664) 				list_del(&ioa_cfg->reset_cmd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5665) 				del_timer(&ioa_cfg->reset_cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5666) 				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5667) 				return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5668) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5669) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5671) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5672) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5674) 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5675) 		/* Mask the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5676) 		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5677) 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5679) 		list_del(&ioa_cfg->reset_cmd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5680) 		del_timer(&ioa_cfg->reset_cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5681) 		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5682) 	} else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5683) 		if (ioa_cfg->clear_isr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5684) 			if (ipr_debug && printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5685) 				dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5686) 					"Spurious interrupt detected. 0x%08X\n", int_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5687) 			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5688) 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5689) 			return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5690) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5691) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5692) 		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5693) 			ioa_cfg->ioa_unit_checked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5694) 		else if (int_reg & IPR_PCII_NO_HOST_RRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5695) 			dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5696) 				"No Host RRQ. 0x%08X\n", int_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5697) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5698) 			dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5699) 				"Permanent IOA failure. 0x%08X\n", int_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5701) 		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5702) 			ioa_cfg->sdt_state = GET_DUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5704) 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5705) 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5706) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5708) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5711) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5712)  * ipr_isr_eh - Interrupt service routine error handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5713)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5714)  * @msg:	message to log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5715)  * @number:	various meanings depending on the caller/message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5716)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5717)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5718)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5719)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5720) static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5721) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5722) 	ioa_cfg->errors_logged++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5723) 	dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5725) 	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5726) 		ioa_cfg->sdt_state = GET_DUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5728) 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5731) static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5732) 						struct list_head *doneq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5733) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5734) 	u32 ioasc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5735) 	u16 cmd_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5736) 	struct ipr_cmnd *ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5737) 	struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5738) 	int num_hrrq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5740) 	/* If interrupts are disabled, ignore the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5741) 	if (!hrr_queue->allow_interrupts)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5742) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5744) 	while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5745) 	       hrr_queue->toggle_bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5747) 		cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5748) 			     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5749) 			     IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5751) 		if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5752) 			     cmd_index < hrr_queue->min_cmd_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5753) 			ipr_isr_eh(ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5754) 				"Invalid response handle from IOA: ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5755) 				cmd_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5756) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5757) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5759) 		ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5760) 		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5762) 		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5764) 		list_move_tail(&ipr_cmd->queue, doneq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5766) 		if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5767) 			hrr_queue->hrrq_curr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5768) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5769) 			hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5770) 			hrr_queue->toggle_bit ^= 1u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5771) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5772) 		num_hrrq++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5773) 		if (budget > 0 && num_hrrq >= budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5774) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5775) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5777) 	return num_hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5778) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5780) static int ipr_iopoll(struct irq_poll *iop, int budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5781) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5782) 	struct ipr_hrr_queue *hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5783) 	struct ipr_cmnd *ipr_cmd, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5784) 	unsigned long hrrq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5785) 	int completed_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5786) 	LIST_HEAD(doneq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5788) 	hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5790) 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5791) 	completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5793) 	if (completed_ops < budget)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5794) 		irq_poll_complete(iop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5795) 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5797) 	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5798) 		list_del(&ipr_cmd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5799) 		del_timer(&ipr_cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5800) 		ipr_cmd->fast_done(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5801) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5803) 	return completed_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5806) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5807)  * ipr_isr - Interrupt service routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5808)  * @irq:	irq number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5809)  * @devp:	pointer to ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5810)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5811)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5812)  * 	IRQ_NONE / IRQ_HANDLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5813)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5814) static irqreturn_t ipr_isr(int irq, void *devp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5816) 	struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5817) 	struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5818) 	unsigned long hrrq_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5819) 	u32 int_reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5820) 	int num_hrrq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5821) 	int irq_none = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5822) 	struct ipr_cmnd *ipr_cmd, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5823) 	irqreturn_t rc = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5824) 	LIST_HEAD(doneq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5826) 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5827) 	/* If interrupts are disabled, ignore the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5828) 	if (!hrrq->allow_interrupts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5829) 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5830) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5831) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5833) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5834) 		if (ipr_process_hrrq(hrrq, -1, &doneq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5835) 			rc =  IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5837) 			if (!ioa_cfg->clear_isr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5838) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5840) 			/* Clear the PCI interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5841) 			num_hrrq = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5842) 			do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5843) 				writel(IPR_PCII_HRRQ_UPDATED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5844) 				     ioa_cfg->regs.clr_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5845) 				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5846) 			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5847) 				num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5849) 		} else if (rc == IRQ_NONE && irq_none == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5850) 			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5851) 			irq_none++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5852) 		} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5853) 			   int_reg & IPR_PCII_HRRQ_UPDATED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5854) 			ipr_isr_eh(ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5855) 				"Error clearing HRRQ: ", num_hrrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5856) 			rc = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5857) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5858) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5859) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5860) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5862) 	if (unlikely(rc == IRQ_NONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5863) 		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5865) 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5866) 	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5867) 		list_del(&ipr_cmd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5868) 		del_timer(&ipr_cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5869) 		ipr_cmd->fast_done(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5871) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5874) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5875)  * ipr_isr_mhrrq - Interrupt service routine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5876)  * @irq:	irq number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5877)  * @devp:	pointer to ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5878)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5879)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5880)  *	IRQ_NONE / IRQ_HANDLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5881)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5882) static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5883) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5884) 	struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5885) 	struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5886) 	unsigned long hrrq_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5887) 	struct ipr_cmnd *ipr_cmd, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5888) 	irqreturn_t rc = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5889) 	LIST_HEAD(doneq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5891) 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5893) 	/* If interrupts are disabled, ignore the interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5894) 	if (!hrrq->allow_interrupts) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5895) 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5896) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5897) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5899) 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5900) 		if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5901) 		       hrrq->toggle_bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5902) 			irq_poll_sched(&hrrq->iopoll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5903) 			spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5904) 			return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5905) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5906) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5907) 		if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5908) 			hrrq->toggle_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5910) 			if (ipr_process_hrrq(hrrq, -1, &doneq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5911) 				rc =  IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5912) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5914) 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5916) 	list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5917) 		list_del(&ipr_cmd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5918) 		del_timer(&ipr_cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5919) 		ipr_cmd->fast_done(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5920) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5921) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5922) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5924) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5925)  * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5926)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5927)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5928)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5929)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5930)  * 	0 on success / -1 on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5931)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5932) static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5933) 			     struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5935) 	int i, nseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5936) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5937) 	u32 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5938) 	u32 ioadl_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5939) 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5940) 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5941) 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5943) 	length = scsi_bufflen(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5944) 	if (!length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5945) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5947) 	nseg = scsi_dma_map(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5948) 	if (nseg < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5949) 		if (printk_ratelimit())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5950) 			dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5951) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5952) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5954) 	ipr_cmd->dma_use_sg = nseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5956) 	ioarcb->data_transfer_length = cpu_to_be32(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5957) 	ioarcb->ioadl_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5958) 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5960) 	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5961) 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5962) 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5963) 	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5964) 		ioadl_flags = IPR_IOADL_FLAGS_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5966) 	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5967) 		ioadl64[i].flags = cpu_to_be32(ioadl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5968) 		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5969) 		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5970) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5972) 	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5973) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5976) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5977)  * ipr_build_ioadl - Build a scatter/gather list and map the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5978)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5979)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5980)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5981)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5982)  * 	0 on success / -1 on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5983)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5984) static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5985) 			   struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5986) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5987) 	int i, nseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5988) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5989) 	u32 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5990) 	u32 ioadl_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5991) 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5992) 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5993) 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5995) 	length = scsi_bufflen(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5996) 	if (!length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5997) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  5999) 	nseg = scsi_dma_map(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6000) 	if (nseg < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6001) 		dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6002) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6003) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6005) 	ipr_cmd->dma_use_sg = nseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6007) 	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6008) 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6009) 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6010) 		ioarcb->data_transfer_length = cpu_to_be32(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6011) 		ioarcb->ioadl_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6012) 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6013) 	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6014) 		ioadl_flags = IPR_IOADL_FLAGS_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6015) 		ioarcb->read_data_transfer_length = cpu_to_be32(length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6016) 		ioarcb->read_ioadl_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6017) 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6018) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6020) 	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6021) 		ioadl = ioarcb->u.add_data.u.ioadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6022) 		ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6023) 				    offsetof(struct ipr_ioarcb, u.add_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6024) 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6025) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6027) 	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6028) 		ioadl[i].flags_and_data_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6029) 			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6030) 		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6031) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6033) 	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6034) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6035) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6037) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6038)  * __ipr_erp_done - Process completion of ERP for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6039)  * @ipr_cmd:		ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6040)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6041)  * This function copies the sense buffer into the scsi_cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6042)  * struct and pushes the scsi_done function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6043)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6044)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6045)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6046)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6047) static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6048) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6049) 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6050) 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6051) 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6053) 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6054) 		scsi_cmd->result |= (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6055) 		scmd_printk(KERN_ERR, scsi_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6056) 			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6057) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6058) 		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6059) 		       SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6060) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6062) 	if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6063) 		if (!ipr_is_naca_model(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6064) 			res->needs_sync_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6065) 		res->in_erp = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6066) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6067) 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6068) 	scsi_cmd->scsi_done(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6069) 	if (ipr_cmd->eh_comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6070) 		complete(ipr_cmd->eh_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6071) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6074) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6075)  * ipr_erp_done - Process completion of ERP for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6076)  * @ipr_cmd:		ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6077)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6078)  * This function copies the sense buffer into the scsi_cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6079)  * struct and pushes the scsi_done function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6080)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6081)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6082)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6083)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6084) static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6085) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6086) 	struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6087) 	unsigned long hrrq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6089) 	spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6090) 	__ipr_erp_done(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6091) 	spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6094) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6095)  * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6096)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6097)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6098)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6099)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6100)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6101) static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6103) 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6104) 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6105) 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6107) 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6108) 	ioarcb->data_transfer_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6109) 	ioarcb->read_data_transfer_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6110) 	ioarcb->ioadl_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6111) 	ioarcb->read_ioadl_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6112) 	ioasa->hdr.ioasc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6113) 	ioasa->hdr.residual_data_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6115) 	if (ipr_cmd->ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6116) 		ioarcb->u.sis64_addr_data.data_ioadl_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6117) 			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6118) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6119) 		ioarcb->write_ioadl_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6120) 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6121) 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6125) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6126)  * __ipr_erp_request_sense - Send request sense to a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6127)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6128)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6129)  * This function sends a request sense to a device as a result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6130)  * of a check condition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6131)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6132)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6133)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6134)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6135) static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6137) 	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6138) 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6140) 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6141) 		__ipr_erp_done(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6142) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6145) 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6147) 	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6148) 	cmd_pkt->cdb[0] = REQUEST_SENSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6149) 	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6150) 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6151) 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6152) 	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6154) 	ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6155) 		       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6157) 	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6158) 		   IPR_REQUEST_SENSE_TIMEOUT * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6161) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6162)  * ipr_erp_request_sense - Send request sense to a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6163)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6164)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6165)  * This function sends a request sense to a device as a result
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6166)  * of a check condition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6167)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6168)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6169)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6170)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6171) static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6173) 	struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6174) 	unsigned long hrrq_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6176) 	spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6177) 	__ipr_erp_request_sense(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6178) 	spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6181) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6182)  * ipr_erp_cancel_all - Send cancel all to a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6183)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6184)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6185)  * This function sends a cancel all to a device to clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6186)  * queue. If we are running TCQ on the device, QERR is set to 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6187)  * which means all outstanding ops have been dropped on the floor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6188)  * Cancel all will return them to us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6189)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6190)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6191)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6192)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6193) static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6194) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6195) 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6196) 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6197) 	struct ipr_cmd_pkt *cmd_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6199) 	res->in_erp = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6201) 	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6203) 	if (!scsi_cmd->device->simple_tags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6204) 		__ipr_erp_request_sense(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6205) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6208) 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6209) 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6210) 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6212) 	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6213) 		   IPR_CANCEL_ALL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6216) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6217)  * ipr_dump_ioasa - Dump contents of IOASA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6218)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6219)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6220)  * @res:		resource entry struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6221)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6222)  * This function is invoked by the interrupt handler when ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6223)  * fail. It will log the IOASA if appropriate. Only called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6224)  * for GPDD ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6225)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6226)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6227)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6228)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6229) static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6230) 			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6232) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6233) 	u16 data_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6234) 	u32 ioasc, fd_ioasc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6235) 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6236) 	__be32 *ioasa_data = (__be32 *)ioasa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6237) 	int error_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6239) 	ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6240) 	fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6242) 	if (0 == ioasc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6243) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6245) 	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6246) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6248) 	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6249) 		error_index = ipr_get_error(fd_ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6250) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6251) 		error_index = ipr_get_error(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6253) 	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6254) 		/* Don't log an error if the IOA already logged one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6255) 		if (ioasa->hdr.ilid != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6256) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6258) 		if (!ipr_is_gscsi(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6259) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6261) 		if (ipr_error_table[error_index].log_ioasa == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6262) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6265) 	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6267) 	data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6268) 	if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6269) 		data_len = sizeof(struct ipr_ioasa64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6270) 	else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6271) 		data_len = sizeof(struct ipr_ioasa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6273) 	ipr_err("IOASA Dump:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6275) 	for (i = 0; i < data_len / 4; i += 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6276) 		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6277) 			be32_to_cpu(ioasa_data[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6278) 			be32_to_cpu(ioasa_data[i+1]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6279) 			be32_to_cpu(ioasa_data[i+2]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6280) 			be32_to_cpu(ioasa_data[i+3]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6284) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6285)  * ipr_gen_sense - Generate SCSI sense data from an IOASA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6286)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6287)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6288)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6289)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6290)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6291) static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6292) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6293) 	u32 failing_lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6294) 	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6295) 	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6296) 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6297) 	u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6299) 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6301) 	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6302) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6304) 	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6306) 	if (ipr_is_vset_device(res) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6307) 	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6308) 	    ioasa->u.vset.failing_lba_hi != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6309) 		sense_buf[0] = 0x72;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6310) 		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6311) 		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6312) 		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6314) 		sense_buf[7] = 12;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6315) 		sense_buf[8] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6316) 		sense_buf[9] = 0x0A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6317) 		sense_buf[10] = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6319) 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6321) 		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6322) 		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6323) 		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6324) 		sense_buf[15] = failing_lba & 0x000000ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6326) 		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6328) 		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6329) 		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6330) 		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6331) 		sense_buf[19] = failing_lba & 0x000000ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6332) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6333) 		sense_buf[0] = 0x70;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6334) 		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6335) 		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6336) 		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6338) 		/* Illegal request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6339) 		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6340) 		    (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6341) 			sense_buf[7] = 10;	/* additional length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6343) 			/* IOARCB was in error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6344) 			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6345) 				sense_buf[15] = 0xC0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6346) 			else	/* Parameter data was invalid */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6347) 				sense_buf[15] = 0x80;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6349) 			sense_buf[16] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6350) 			    ((IPR_FIELD_POINTER_MASK &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6351) 			      be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6352) 			sense_buf[17] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6353) 			    (IPR_FIELD_POINTER_MASK &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6354) 			     be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6355) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6356) 			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6357) 				if (ipr_is_vset_device(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6358) 					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6359) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6360) 					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6362) 				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6363) 				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6364) 				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6365) 				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6366) 				sense_buf[6] = failing_lba & 0x000000ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6367) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6369) 			sense_buf[7] = 6;	/* additional length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6370) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6374) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6375)  * ipr_get_autosense - Copy autosense data to sense buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6376)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6377)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6378)  * This function copies the autosense buffer to the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6379)  * in the scsi_cmd, if there is autosense available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6380)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6381)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6382)  *	1 if autosense was available / 0 if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6383)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6384) static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6386) 	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6387) 	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6389) 	if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6390) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6392) 	if (ipr_cmd->ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6393) 		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6394) 		       min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6395) 			   SCSI_SENSE_BUFFERSIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6396) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6397) 		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6398) 		       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6399) 			   SCSI_SENSE_BUFFERSIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6400) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6403) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6404)  * ipr_erp_start - Process an error response for a SCSI op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6405)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6406)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6407)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6408)  * This function determines whether or not to initiate ERP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6409)  * on the affected device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6410)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6411)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6412)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6413)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6414) static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6415) 			      struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6417) 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6418) 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6419) 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6420) 	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6422) 	if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6423) 		__ipr_scsi_eh_done(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6424) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6427) 	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6428) 		ipr_gen_sense(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6430) 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6432) 	switch (masked_ioasc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6433) 	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6434) 		if (ipr_is_naca_model(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6435) 			scsi_cmd->result |= (DID_ABORT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6436) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6437) 			scsi_cmd->result |= (DID_IMM_RETRY << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6438) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6439) 	case IPR_IOASC_IR_RESOURCE_HANDLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6440) 	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6441) 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6442) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6443) 	case IPR_IOASC_HW_SEL_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6444) 		scsi_cmd->result |= (DID_NO_CONNECT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6445) 		if (!ipr_is_naca_model(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6446) 			res->needs_sync_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6447) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6448) 	case IPR_IOASC_SYNC_REQUIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6449) 		if (!res->in_erp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6450) 			res->needs_sync_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6451) 		scsi_cmd->result |= (DID_IMM_RETRY << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6452) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6453) 	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6454) 	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6455) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6456) 		 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6457) 		 * so SCSI mid-layer and upper layers handle it accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6458) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6459) 		if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6460) 			scsi_cmd->result |= (DID_PASSTHROUGH << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6461) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6462) 	case IPR_IOASC_BUS_WAS_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6463) 	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6464) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6465) 		 * Report the bus reset and ask for a retry. The device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6466) 		 * will give CC/UA the next command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6467) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6468) 		if (!res->resetting_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6469) 			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6470) 		scsi_cmd->result |= (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6471) 		if (!ipr_is_naca_model(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6472) 			res->needs_sync_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6473) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6474) 	case IPR_IOASC_HW_DEV_BUS_STATUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6475) 		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6476) 		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6477) 			if (!ipr_get_autosense(ipr_cmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6478) 				if (!ipr_is_naca_model(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6479) 					ipr_erp_cancel_all(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6480) 					return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6481) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6482) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6483) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6484) 		if (!ipr_is_naca_model(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6485) 			res->needs_sync_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6486) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6487) 	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6488) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6489) 	case IPR_IOASC_IR_NON_OPTIMIZED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6490) 		if (res->raw_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6491) 			res->raw_mode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6492) 			scsi_cmd->result |= (DID_IMM_RETRY << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6493) 		} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6494) 			scsi_cmd->result |= (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6495) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6496) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6497) 		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6498) 			scsi_cmd->result |= (DID_ERROR << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6499) 		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6500) 			res->needs_sync_complete = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6501) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6502) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6504) 	scsi_dma_unmap(ipr_cmd->scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6505) 	scsi_cmd->scsi_done(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6506) 	if (ipr_cmd->eh_comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6507) 		complete(ipr_cmd->eh_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6508) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6509) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6511) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6512)  * ipr_scsi_done - mid-layer done function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6513)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6514)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6515)  * This function is invoked by the interrupt handler for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6516)  * ops generated by the SCSI mid-layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6517)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6518)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6519)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6520)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6521) static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6523) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6524) 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6525) 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6526) 	unsigned long lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6528) 	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6530) 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6531) 		scsi_dma_unmap(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6533) 		spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6534) 		scsi_cmd->scsi_done(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6535) 		if (ipr_cmd->eh_comp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6536) 			complete(ipr_cmd->eh_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6537) 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6538) 		spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6539) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6540) 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6541) 		spin_lock(&ipr_cmd->hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6542) 		ipr_erp_start(ioa_cfg, ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6543) 		spin_unlock(&ipr_cmd->hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6544) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6548) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6549)  * ipr_queuecommand - Queue a mid-layer request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6550)  * @shost:		scsi host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6551)  * @scsi_cmd:	scsi command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6552)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6553)  * This function queues a request generated by the mid-layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6554)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6555)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6556)  *	0 on success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6557)  *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6558)  *	SCSI_MLQUEUE_HOST_BUSY if host is busy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6559)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6560) static int ipr_queuecommand(struct Scsi_Host *shost,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6561) 			    struct scsi_cmnd *scsi_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6563) 	struct ipr_ioa_cfg *ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6564) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6565) 	struct ipr_ioarcb *ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6566) 	struct ipr_cmnd *ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6567) 	unsigned long hrrq_flags, lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6568) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6569) 	struct ipr_hrr_queue *hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6570) 	int hrrq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6572) 	ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6574) 	scsi_cmd->result = (DID_OK << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6575) 	res = scsi_cmd->device->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6577) 	if (ipr_is_gata(res) && res->sata_port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6578) 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6579) 		rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6580) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6581) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6582) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6584) 	hrrq_id = ipr_get_hrrq_index(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6585) 	hrrq = &ioa_cfg->hrrq[hrrq_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6587) 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6588) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6589) 	 * We are currently blocking all devices due to a host reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6590) 	 * We have told the host to stop giving us new requests, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6591) 	 * ERP ops don't count. FIXME
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6592) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6593) 	if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6594) 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6595) 		return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6596) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6598) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6599) 	 * FIXME - Create scsi_set_host_offline interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6600) 	 *  and the ioa_is_dead check can be removed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6601) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6602) 	if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6603) 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6604) 		goto err_nodev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6607) 	ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6608) 	if (ipr_cmd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6609) 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6610) 		return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6611) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6612) 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6614) 	ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6615) 	ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6617) 	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6618) 	ipr_cmd->scsi_cmd = scsi_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6619) 	ipr_cmd->done = ipr_scsi_eh_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6621) 	if (ipr_is_gscsi(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6622) 		if (scsi_cmd->underflow == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6623) 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6625) 		if (res->reset_occurred) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6626) 			res->reset_occurred = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6627) 			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6628) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6629) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6631) 	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6632) 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6634) 		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6635) 		if (scsi_cmd->flags & SCMD_TAGGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6636) 			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6637) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6638) 			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6639) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6641) 	if (scsi_cmd->cmnd[0] >= 0xC0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6642) 	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6643) 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6644) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6645) 	if (res->raw_mode && ipr_is_af_dasd_device(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6646) 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6648) 		if (scsi_cmd->underflow == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6649) 			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6650) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6652) 	if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6653) 		rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6654) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6655) 		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6657) 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6658) 	if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6659) 		list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6660) 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6661) 		if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6662) 			scsi_dma_unmap(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6663) 		return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6664) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6666) 	if (unlikely(hrrq->ioa_is_dead)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6667) 		list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6668) 		spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6669) 		scsi_dma_unmap(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6670) 		goto err_nodev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6671) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6673) 	ioarcb->res_handle = res->res_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6674) 	if (res->needs_sync_complete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6675) 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6676) 		res->needs_sync_complete = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6677) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6678) 	list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6679) 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6680) 	ipr_send_command(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6681) 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6682) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6684) err_nodev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6685) 	spin_lock_irqsave(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6686) 	memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6687) 	scsi_cmd->result = (DID_NO_CONNECT << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6688) 	scsi_cmd->scsi_done(scsi_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6689) 	spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6690) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6691) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6693) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6694)  * ipr_ioctl - IOCTL handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6695)  * @sdev:	scsi device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6696)  * @cmd:	IOCTL cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6697)  * @arg:	IOCTL arg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6698)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6699)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6700)  * 	0 on success / other on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6701)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6702) static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6703) 		     void __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6705) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6707) 	res = (struct ipr_resource_entry *)sdev->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6708) 	if (res && ipr_is_gata(res)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6709) 		if (cmd == HDIO_GET_IDENTITY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6710) 			return -ENOTTY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6711) 		return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6714) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6717) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6718)  * ipr_info - Get information about the card/driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6719)  * @host:	scsi host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6720)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6721)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6722)  * 	pointer to buffer with description string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6723)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6724) static const char *ipr_ioa_info(struct Scsi_Host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6726) 	static char buffer[512];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6727) 	struct ipr_ioa_cfg *ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6728) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6730) 	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6732) 	spin_lock_irqsave(host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6733) 	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6734) 	spin_unlock_irqrestore(host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6736) 	return buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6739) static struct scsi_host_template driver_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6740) 	.module = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6741) 	.name = "IPR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6742) 	.info = ipr_ioa_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6743) 	.ioctl = ipr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6744) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6745) 	.compat_ioctl = ipr_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6746) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6747) 	.queuecommand = ipr_queuecommand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6748) 	.dma_need_drain = ata_scsi_dma_need_drain,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6749) 	.eh_abort_handler = ipr_eh_abort,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6750) 	.eh_device_reset_handler = ipr_eh_dev_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6751) 	.eh_host_reset_handler = ipr_eh_host_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6752) 	.slave_alloc = ipr_slave_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6753) 	.slave_configure = ipr_slave_configure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6754) 	.slave_destroy = ipr_slave_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6755) 	.scan_finished = ipr_scan_finished,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6756) 	.target_alloc = ipr_target_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6757) 	.target_destroy = ipr_target_destroy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6758) 	.change_queue_depth = ipr_change_queue_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6759) 	.bios_param = ipr_biosparam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6760) 	.can_queue = IPR_MAX_COMMANDS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6761) 	.this_id = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6762) 	.sg_tablesize = IPR_MAX_SGLIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6763) 	.max_sectors = IPR_IOA_MAX_SECTORS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6764) 	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6765) 	.shost_attrs = ipr_ioa_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6766) 	.sdev_attrs = ipr_dev_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6767) 	.proc_name = IPR_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6768) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6770) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6771)  * ipr_ata_phy_reset - libata phy_reset handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6772)  * @ap:		ata port to reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6773)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6774)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6775) static void ipr_ata_phy_reset(struct ata_port *ap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6777) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6778) 	struct ipr_sata_port *sata_port = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6779) 	struct ipr_resource_entry *res = sata_port->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6780) 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6781) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6783) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6784) 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6785) 	while (ioa_cfg->in_reset_reload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6786) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6787) 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6788) 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6791) 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6792) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6794) 	rc = ipr_device_reset(ioa_cfg, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6796) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6797) 		ap->link.device[0].class = ATA_DEV_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6798) 		goto out_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6799) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6801) 	ap->link.device[0].class = res->ata_class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6802) 	if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6803) 		ap->link.device[0].class = ATA_DEV_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6805) out_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6806) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6807) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6808) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6810) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6811)  * ipr_ata_post_internal - Cleanup after an internal command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6812)  * @qc:	ATA queued command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6813)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6814)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6815)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6816)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6817) static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6819) 	struct ipr_sata_port *sata_port = qc->ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6820) 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6821) 	struct ipr_cmnd *ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6822) 	struct ipr_hrr_queue *hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6823) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6825) 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6826) 	while (ioa_cfg->in_reset_reload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6827) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6828) 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6829) 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6830) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6832) 	for_each_hrrq(hrrq, ioa_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6833) 		spin_lock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6834) 		list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6835) 			if (ipr_cmd->qc == qc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6836) 				ipr_device_reset(ioa_cfg, sata_port->res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6837) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6838) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6839) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6840) 		spin_unlock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6842) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6845) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6846)  * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6847)  * @regs:	destination
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6848)  * @tf:	source ATA taskfile
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6849)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6850)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6851)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6852)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6853) static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6854) 			     struct ata_taskfile *tf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6855) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6856) 	regs->feature = tf->feature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6857) 	regs->nsect = tf->nsect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6858) 	regs->lbal = tf->lbal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6859) 	regs->lbam = tf->lbam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6860) 	regs->lbah = tf->lbah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6861) 	regs->device = tf->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6862) 	regs->command = tf->command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6863) 	regs->hob_feature = tf->hob_feature;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6864) 	regs->hob_nsect = tf->hob_nsect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6865) 	regs->hob_lbal = tf->hob_lbal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6866) 	regs->hob_lbam = tf->hob_lbam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6867) 	regs->hob_lbah = tf->hob_lbah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6868) 	regs->ctl = tf->ctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6871) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6872)  * ipr_sata_done - done function for SATA commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6873)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6874)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6875)  * This function is invoked by the interrupt handler for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6876)  * ops generated by the SCSI mid-layer to SATA devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6877)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6878)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6879)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6880)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6881) static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6882) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6883) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6884) 	struct ata_queued_cmd *qc = ipr_cmd->qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6885) 	struct ipr_sata_port *sata_port = qc->ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6886) 	struct ipr_resource_entry *res = sata_port->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6887) 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6889) 	spin_lock(&ipr_cmd->hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6890) 	if (ipr_cmd->ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6891) 		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6892) 		       sizeof(struct ipr_ioasa_gata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6893) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6894) 		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6895) 		       sizeof(struct ipr_ioasa_gata));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6896) 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6898) 	if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6899) 		scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6901) 	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6902) 		qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6903) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6904) 		qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6905) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6906) 	spin_unlock(&ipr_cmd->hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6907) 	ata_qc_complete(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6908) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6910) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6911)  * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6912)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6913)  * @qc:		ATA queued command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6914)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6915)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6916) static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6917) 				  struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6919) 	u32 ioadl_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6920) 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6921) 	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6922) 	struct ipr_ioadl64_desc *last_ioadl64 = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6923) 	int len = qc->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6924) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6925) 	unsigned int si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6926) 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6928) 	if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6929) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6931) 	if (qc->dma_dir == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6932) 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6933) 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6934) 	} else if (qc->dma_dir == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6935) 		ioadl_flags = IPR_IOADL_FLAGS_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6937) 	ioarcb->data_transfer_length = cpu_to_be32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6938) 	ioarcb->ioadl_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6939) 		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6940) 	ioarcb->u.sis64_addr_data.data_ioadl_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6941) 		cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6943) 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6944) 		ioadl64->flags = cpu_to_be32(ioadl_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6945) 		ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6946) 		ioadl64->address = cpu_to_be64(sg_dma_address(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6948) 		last_ioadl64 = ioadl64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6949) 		ioadl64++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6952) 	if (likely(last_ioadl64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6953) 		last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6956) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6957)  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6958)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6959)  * @qc:		ATA queued command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6960)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6961)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6962) static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6963) 				struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6965) 	u32 ioadl_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6966) 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6967) 	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6968) 	struct ipr_ioadl_desc *last_ioadl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6969) 	int len = qc->nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6970) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6971) 	unsigned int si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6973) 	if (len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6974) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6976) 	if (qc->dma_dir == DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6977) 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6978) 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6979) 		ioarcb->data_transfer_length = cpu_to_be32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6980) 		ioarcb->ioadl_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6981) 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6982) 	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6983) 		ioadl_flags = IPR_IOADL_FLAGS_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6984) 		ioarcb->read_data_transfer_length = cpu_to_be32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6985) 		ioarcb->read_ioadl_len =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6986) 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6987) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6989) 	for_each_sg(qc->sg, sg, qc->n_elem, si) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6990) 		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6991) 		ioadl->address = cpu_to_be32(sg_dma_address(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6993) 		last_ioadl = ioadl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6994) 		ioadl++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6995) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6997) 	if (likely(last_ioadl))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6998) 		last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  6999) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7001) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7002)  * ipr_qc_defer - Get a free ipr_cmd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7003)  * @qc:	queued command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7004)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7005)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7006)  *	0 if success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7007)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7008) static int ipr_qc_defer(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7010) 	struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7011) 	struct ipr_sata_port *sata_port = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7012) 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7013) 	struct ipr_cmnd *ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7014) 	struct ipr_hrr_queue *hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7015) 	int hrrq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7017) 	hrrq_id = ipr_get_hrrq_index(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7018) 	hrrq = &ioa_cfg->hrrq[hrrq_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7020) 	qc->lldd_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7021) 	spin_lock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7022) 	if (unlikely(hrrq->ioa_is_dead)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7023) 		spin_unlock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7024) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7025) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7027) 	if (unlikely(!hrrq->allow_cmds)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7028) 		spin_unlock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7029) 		return ATA_DEFER_LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7030) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7032) 	ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7033) 	if (ipr_cmd == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7034) 		spin_unlock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7035) 		return ATA_DEFER_LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7036) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7038) 	qc->lldd_task = ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7039) 	spin_unlock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7040) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7043) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7044)  * ipr_qc_issue - Issue a SATA qc to a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7045)  * @qc:	queued command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7046)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7047)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7048)  * 	0 if success
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7049)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7050) static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7051) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7052) 	struct ata_port *ap = qc->ap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7053) 	struct ipr_sata_port *sata_port = ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7054) 	struct ipr_resource_entry *res = sata_port->res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7055) 	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7056) 	struct ipr_cmnd *ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7057) 	struct ipr_ioarcb *ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7058) 	struct ipr_ioarcb_ata_regs *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7060) 	if (qc->lldd_task == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7061) 		ipr_qc_defer(qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7063) 	ipr_cmd = qc->lldd_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7064) 	if (ipr_cmd == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7065) 		return AC_ERR_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7067) 	qc->lldd_task = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7068) 	spin_lock(&ipr_cmd->hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7069) 	if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7070) 			ipr_cmd->hrrq->ioa_is_dead)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7071) 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7072) 		spin_unlock(&ipr_cmd->hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7073) 		return AC_ERR_SYSTEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7074) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7076) 	ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7077) 	ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7079) 	if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7080) 		regs = &ipr_cmd->i.ata_ioadl.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7081) 		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7082) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7083) 		regs = &ioarcb->u.add_data.u.regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7085) 	memset(regs, 0, sizeof(*regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7086) 	ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7088) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7089) 	ipr_cmd->qc = qc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7090) 	ipr_cmd->done = ipr_sata_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7091) 	ipr_cmd->ioarcb.res_handle = res->res_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7092) 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7093) 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7094) 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7095) 	ipr_cmd->dma_use_sg = qc->n_elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7097) 	if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7098) 		ipr_build_ata_ioadl64(ipr_cmd, qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7099) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7100) 		ipr_build_ata_ioadl(ipr_cmd, qc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7102) 	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7103) 	ipr_copy_sata_tf(regs, &qc->tf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7104) 	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7105) 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7107) 	switch (qc->tf.protocol) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7108) 	case ATA_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7109) 	case ATA_PROT_PIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7110) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7112) 	case ATA_PROT_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7113) 		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7114) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7116) 	case ATAPI_PROT_PIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7117) 	case ATAPI_PROT_NODATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7118) 		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7119) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7121) 	case ATAPI_PROT_DMA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7122) 		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7123) 		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7124) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7126) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7127) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7128) 		spin_unlock(&ipr_cmd->hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7129) 		return AC_ERR_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7132) 	ipr_send_command(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7133) 	spin_unlock(&ipr_cmd->hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7135) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7138) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7139)  * ipr_qc_fill_rtf - Read result TF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7140)  * @qc: ATA queued command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7141)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7142)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7143)  * 	true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7144)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7145) static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7147) 	struct ipr_sata_port *sata_port = qc->ap->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7148) 	struct ipr_ioasa_gata *g = &sata_port->ioasa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7149) 	struct ata_taskfile *tf = &qc->result_tf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7151) 	tf->feature = g->error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7152) 	tf->nsect = g->nsect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7153) 	tf->lbal = g->lbal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7154) 	tf->lbam = g->lbam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7155) 	tf->lbah = g->lbah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7156) 	tf->device = g->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7157) 	tf->command = g->status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7158) 	tf->hob_nsect = g->hob_nsect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7159) 	tf->hob_lbal = g->hob_lbal;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7160) 	tf->hob_lbam = g->hob_lbam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7161) 	tf->hob_lbah = g->hob_lbah;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7163) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7166) static struct ata_port_operations ipr_sata_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7167) 	.phy_reset = ipr_ata_phy_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7168) 	.hardreset = ipr_sata_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7169) 	.post_internal_cmd = ipr_ata_post_internal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7170) 	.qc_prep = ata_noop_qc_prep,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7171) 	.qc_defer = ipr_qc_defer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7172) 	.qc_issue = ipr_qc_issue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7173) 	.qc_fill_rtf = ipr_qc_fill_rtf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7174) 	.port_start = ata_sas_port_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7175) 	.port_stop = ata_sas_port_stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7176) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7178) static struct ata_port_info sata_port_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7179) 	.flags		= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7180) 			  ATA_FLAG_SAS_HOST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7181) 	.pio_mask	= ATA_PIO4_ONLY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7182) 	.mwdma_mask	= ATA_MWDMA2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7183) 	.udma_mask	= ATA_UDMA6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7184) 	.port_ops	= &ipr_sata_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7185) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7187) #ifdef CONFIG_PPC_PSERIES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7188) static const u16 ipr_blocked_processors[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7189) 	PVR_NORTHSTAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7190) 	PVR_PULSAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7191) 	PVR_POWER4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7192) 	PVR_ICESTAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7193) 	PVR_SSTAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7194) 	PVR_POWER4p,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7195) 	PVR_630,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7196) 	PVR_630p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7197) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7199) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7200)  * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7201)  * @ioa_cfg:	ioa cfg struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7202)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7203)  * Adapters that use Gemstone revision < 3.1 do not work reliably on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7204)  * certain pSeries hardware. This function determines if the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7205)  * adapter is in one of these confgurations or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7206)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7207)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7208)  * 	1 if adapter is not supported / 0 if adapter is supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7209)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7210) static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7212) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7214) 	if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7215) 		for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7216) 			if (pvr_version_is(ipr_blocked_processors[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7217) 				return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7218) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7219) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7220) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7222) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7223) #define ipr_invalid_adapter(ioa_cfg) 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7224) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7226) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7227)  * ipr_ioa_bringdown_done - IOA bring down completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7228)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7229)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7230)  * This function processes the completion of an adapter bring down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7231)  * It wakes any reset sleepers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7232)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7233)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7234)  * 	IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7235)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7236) static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7238) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7239) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7241) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7242) 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7243) 		ipr_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7244) 		ioa_cfg->scsi_unblock = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7245) 		schedule_work(&ioa_cfg->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7248) 	ioa_cfg->in_reset_reload = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7249) 	ioa_cfg->reset_retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7250) 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7251) 		spin_lock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7252) 		ioa_cfg->hrrq[i].ioa_is_dead = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7253) 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7254) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7255) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7257) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7258) 	wake_up_all(&ioa_cfg->reset_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7259) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7261) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7264) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7265)  * ipr_ioa_reset_done - IOA reset completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7266)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7267)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7268)  * This function processes the completion of an adapter reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7269)  * It schedules any necessary mid-layer add/removes and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7270)  * wakes any reset sleepers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7271)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7272)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7273)  * 	IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7274)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7275) static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7277) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7278) 	struct ipr_resource_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7279) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7281) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7282) 	ioa_cfg->in_reset_reload = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7283) 	for (j = 0; j < ioa_cfg->hrrq_num; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7284) 		spin_lock(&ioa_cfg->hrrq[j]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7285) 		ioa_cfg->hrrq[j].allow_cmds = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7286) 		spin_unlock(&ioa_cfg->hrrq[j]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7287) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7288) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7289) 	ioa_cfg->reset_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7290) 	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7292) 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7293) 		if (res->add_to_ml || res->del_from_ml) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7294) 			ipr_trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7295) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7296) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7297) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7298) 	schedule_work(&ioa_cfg->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7300) 	for (j = 0; j < IPR_NUM_HCAMS; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7301) 		list_del_init(&ioa_cfg->hostrcb[j]->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7302) 		if (j < IPR_NUM_LOG_HCAMS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7303) 			ipr_send_hcam(ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7304) 				IPR_HCAM_CDB_OP_CODE_LOG_DATA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7305) 				ioa_cfg->hostrcb[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7306) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7307) 			ipr_send_hcam(ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7308) 				IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7309) 				ioa_cfg->hostrcb[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7312) 	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7313) 	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7315) 	ioa_cfg->reset_retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7316) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7317) 	wake_up_all(&ioa_cfg->reset_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7319) 	ioa_cfg->scsi_unblock = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7320) 	schedule_work(&ioa_cfg->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7321) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7322) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7325) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7326)  * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7327)  * @supported_dev:	supported device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7328)  * @vpids:			vendor product id struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7329)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7330)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7331)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7332)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7333) static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7334) 				 struct ipr_std_inq_vpids *vpids)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7336) 	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7337) 	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7338) 	supported_dev->num_records = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7339) 	supported_dev->data_length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7340) 		cpu_to_be16(sizeof(struct ipr_supported_device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7341) 	supported_dev->reserved = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7344) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7345)  * ipr_set_supported_devs - Send Set Supported Devices for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7346)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7347)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7348)  * This function sends a Set Supported Devices to the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7349)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7350)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7351)  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7352)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7353) static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7354) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7355) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7356) 	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7357) 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7358) 	struct ipr_resource_entry *res = ipr_cmd->u.res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7360) 	ipr_cmd->job_step = ipr_ioa_reset_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7362) 	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7363) 		if (!ipr_is_scsi_disk(res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7364) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7366) 		ipr_cmd->u.res = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7367) 		ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7369) 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7370) 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7371) 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7373) 		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7374) 		ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7375) 		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7376) 		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7378) 		ipr_init_ioadl(ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7379) 			       ioa_cfg->vpd_cbs_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7380) 				 offsetof(struct ipr_misc_cbs, supp_dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7381) 			       sizeof(struct ipr_supported_device),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7382) 			       IPR_IOADL_FLAGS_WRITE_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7384) 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7385) 			   IPR_SET_SUP_DEVICE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7387) 		if (!ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7388) 			ipr_cmd->job_step = ipr_set_supported_devs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7389) 		LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7390) 		return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7393) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7394) 	return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7397) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7398)  * ipr_get_mode_page - Locate specified mode page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7399)  * @mode_pages:	mode page buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7400)  * @page_code:	page code to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7401)  * @len:		minimum required length for mode page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7402)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7403)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7404)  * 	pointer to mode page / NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7405)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7406) static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7407) 			       u32 page_code, u32 len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7409) 	struct ipr_mode_page_hdr *mode_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7410) 	u32 page_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7411) 	u32 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7413) 	if (!mode_pages || (mode_pages->hdr.length == 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7414) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7416) 	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7417) 	mode_hdr = (struct ipr_mode_page_hdr *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7418) 		(mode_pages->data + mode_pages->hdr.block_desc_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7420) 	while (length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7421) 		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7422) 			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7423) 				return mode_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7424) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7425) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7426) 			page_length = (sizeof(struct ipr_mode_page_hdr) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7427) 				       mode_hdr->page_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7428) 			length -= page_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7429) 			mode_hdr = (struct ipr_mode_page_hdr *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7430) 				((unsigned long)mode_hdr + page_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7431) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7432) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7433) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7436) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7437)  * ipr_check_term_power - Check for term power errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7438)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7439)  * @mode_pages:	IOAFP mode pages buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7440)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7441)  * Check the IOAFP's mode page 28 for term power errors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7442)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7443)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7444)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7445)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7446) static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7447) 				 struct ipr_mode_pages *mode_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7448) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7449) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7450) 	int entry_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7451) 	struct ipr_dev_bus_entry *bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7452) 	struct ipr_mode_page28 *mode_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7454) 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7455) 				      sizeof(struct ipr_mode_page28));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7457) 	entry_length = mode_page->entry_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7459) 	bus = mode_page->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7461) 	for (i = 0; i < mode_page->num_entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7462) 		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7463) 			dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7464) 				"Term power is absent on scsi bus %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7465) 				bus->res_addr.bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7466) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7468) 		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7472) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7473)  * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7474)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7475)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7476)  * Looks through the config table checking for SES devices. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7477)  * the SES device is in the SES table indicating a maximum SCSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7478)  * bus speed, the speed is limited for the bus.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7479)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7480)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7481)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7482)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7483) static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7485) 	u32 max_xfer_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7486) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7488) 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7489) 		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7490) 						       ioa_cfg->bus_attr[i].bus_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7492) 		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7493) 			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7494) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7497) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7498)  * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7499)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7500)  * @mode_pages:	mode page 28 buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7501)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7502)  * Updates mode page 28 based on driver configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7503)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7504)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7505)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7506)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7507) static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7508) 					  struct ipr_mode_pages *mode_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7510) 	int i, entry_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7511) 	struct ipr_dev_bus_entry *bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7512) 	struct ipr_bus_attributes *bus_attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7513) 	struct ipr_mode_page28 *mode_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7515) 	mode_page = ipr_get_mode_page(mode_pages, 0x28,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7516) 				      sizeof(struct ipr_mode_page28));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7518) 	entry_length = mode_page->entry_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7520) 	/* Loop for each device bus entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7521) 	for (i = 0, bus = mode_page->bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7522) 	     i < mode_page->num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7523) 	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7524) 		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7525) 			dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7526) 				"Invalid resource address reported: 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7527) 				IPR_GET_PHYS_LOC(bus->res_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7528) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7529) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7531) 		bus_attr = &ioa_cfg->bus_attr[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7532) 		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7533) 		bus->bus_width = bus_attr->bus_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7534) 		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7535) 		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7536) 		if (bus_attr->qas_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7537) 			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7538) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7539) 			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7540) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7543) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7544)  * ipr_build_mode_select - Build a mode select command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7545)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7546)  * @res_handle:	resource handle to send command to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7547)  * @parm:		Byte 2 of Mode Sense command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7548)  * @dma_addr:	DMA buffer address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7549)  * @xfer_len:	data transfer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7550)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7551)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7552)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7553)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7554) static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7555) 				  __be32 res_handle, u8 parm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7556) 				  dma_addr_t dma_addr, u8 xfer_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7558) 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7560) 	ioarcb->res_handle = res_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7561) 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7562) 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7563) 	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7564) 	ioarcb->cmd_pkt.cdb[1] = parm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7565) 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7567) 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7570) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7571)  * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7572)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7573)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7574)  * This function sets up the SCSI bus attributes and sends
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7575)  * a Mode Select for Page 28 to activate them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7576)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7577)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7578)  * 	IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7579)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7580) static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7582) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7583) 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7584) 	int length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7586) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7587) 	ipr_scsi_bus_speed_limit(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7588) 	ipr_check_term_power(ioa_cfg, mode_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7589) 	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7590) 	length = mode_pages->hdr.length + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7591) 	mode_pages->hdr.length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7593) 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7594) 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7595) 			      length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7597) 	ipr_cmd->job_step = ipr_set_supported_devs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7598) 	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7599) 				    struct ipr_resource_entry, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7600) 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7602) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7603) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7604) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7606) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7607)  * ipr_build_mode_sense - Builds a mode sense command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7608)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7609)  * @res_handle:		resource entry struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7610)  * @parm:		Byte 2 of mode sense command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7611)  * @dma_addr:	DMA address of mode sense buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7612)  * @xfer_len:	Size of DMA buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7613)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7614)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7615)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7616)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7617) static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7618) 				 __be32 res_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7619) 				 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7620) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7621) 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7623) 	ioarcb->res_handle = res_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7624) 	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7625) 	ioarcb->cmd_pkt.cdb[2] = parm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7626) 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7627) 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7629) 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7630) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7632) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7633)  * ipr_reset_cmd_failed - Handle failure of IOA reset command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7634)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7635)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7636)  * This function handles the failure of an IOA bringup command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7637)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7638)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7639)  * 	IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7640)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7641) static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7643) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7644) 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7646) 	dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7647) 		"0x%02X failed with IOASC: 0x%08X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7648) 		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7650) 	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7651) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7652) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7655) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7656)  * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7657)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7658)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7659)  * This function handles the failure of a Mode Sense to the IOAFP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7660)  * Some adapters do not handle all mode pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7661)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7662)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7663)  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7664)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7665) static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7667) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7668) 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7670) 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7671) 		ipr_cmd->job_step = ipr_set_supported_devs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7672) 		ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7673) 					    struct ipr_resource_entry, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7674) 		return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7677) 	return ipr_reset_cmd_failed(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7680) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7681)  * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7682)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7683)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7684)  * This function send a Page 28 mode sense to the IOA to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7685)  * retrieve SCSI bus attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7686)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7687)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7688)  * 	IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7689)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7690) static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7692) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7694) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7695) 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7696) 			     0x28, ioa_cfg->vpd_cbs_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7697) 			     offsetof(struct ipr_misc_cbs, mode_pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7698) 			     sizeof(struct ipr_mode_pages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7700) 	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7701) 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7703) 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7705) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7706) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7709) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7710)  * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7711)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7712)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7713)  * This function enables dual IOA RAID support if possible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7714)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7715)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7716)  * 	IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7717)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7718) static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7720) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7721) 	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7722) 	struct ipr_mode_page24 *mode_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7723) 	int length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7725) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7726) 	mode_page = ipr_get_mode_page(mode_pages, 0x24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7727) 				      sizeof(struct ipr_mode_page24));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7729) 	if (mode_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7730) 		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7732) 	length = mode_pages->hdr.length + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7733) 	mode_pages->hdr.length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7735) 	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7736) 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7737) 			      length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7739) 	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7740) 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7742) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7743) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7746) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7747)  * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7748)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7749)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7750)  * This function handles the failure of a Mode Sense to the IOAFP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7751)  * Some adapters do not handle all mode pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7752)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7753)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7754)  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7755)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7756) static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7757) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7758) 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7760) 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7761) 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7762) 		return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7763) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7765) 	return ipr_reset_cmd_failed(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7768) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7769)  * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7770)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7771)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7772)  * This function send a mode sense to the IOA to retrieve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7773)  * the IOA Advanced Function Control mode page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7774)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7775)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7776)  * 	IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7777)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7778) static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7780) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7782) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7783) 	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7784) 			     0x24, ioa_cfg->vpd_cbs_dma +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7785) 			     offsetof(struct ipr_misc_cbs, mode_pages),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7786) 			     sizeof(struct ipr_mode_pages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7788) 	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7789) 	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7791) 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7793) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7794) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7797) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7798)  * ipr_init_res_table - Initialize the resource table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7799)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7800)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7801)  * This function looks through the existing resource table, comparing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7802)  * it with the config table. This function will take care of old/new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7803)  * devices and schedule adding/removing them from the mid-layer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7804)  * as appropriate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7805)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7806)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7807)  * 	IPR_RC_JOB_CONTINUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7808)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7809) static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7811) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7812) 	struct ipr_resource_entry *res, *temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7813) 	struct ipr_config_table_entry_wrapper cfgtew;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7814) 	int entries, found, flag, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7815) 	LIST_HEAD(old_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7817) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7818) 	if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7819) 		flag = ioa_cfg->u.cfg_table64->hdr64.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7820) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7821) 		flag = ioa_cfg->u.cfg_table->hdr.flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7823) 	if (flag & IPR_UCODE_DOWNLOAD_REQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7824) 		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7826) 	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7827) 		list_move_tail(&res->queue, &old_res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7829) 	if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7830) 		entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7831) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7832) 		entries = ioa_cfg->u.cfg_table->hdr.num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7834) 	for (i = 0; i < entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7835) 		if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7836) 			cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7837) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7838) 			cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7839) 		found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7841) 		list_for_each_entry_safe(res, temp, &old_res, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7842) 			if (ipr_is_same_device(res, &cfgtew)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7843) 				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7844) 				found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7845) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7846) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7847) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7849) 		if (!found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7850) 			if (list_empty(&ioa_cfg->free_res_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7851) 				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7852) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7853) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7855) 			found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7856) 			res = list_entry(ioa_cfg->free_res_q.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7857) 					 struct ipr_resource_entry, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7858) 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7859) 			ipr_init_res_entry(res, &cfgtew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7860) 			res->add_to_ml = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7861) 		} else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7862) 			res->sdev->allow_restart = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7864) 		if (found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7865) 			ipr_update_res_entry(res, &cfgtew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7866) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7868) 	list_for_each_entry_safe(res, temp, &old_res, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7869) 		if (res->sdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7870) 			res->del_from_ml = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7871) 			res->res_handle = IPR_INVALID_RES_HANDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7872) 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7873) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7874) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7876) 	list_for_each_entry_safe(res, temp, &old_res, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7877) 		ipr_clear_res_target(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7878) 		list_move_tail(&res->queue, &ioa_cfg->free_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7879) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7881) 	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7882) 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7883) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7884) 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7886) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7887) 	return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7890) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7891)  * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7892)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7893)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7894)  * This function sends a Query IOA Configuration command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7895)  * to the adapter to retrieve the IOA configuration table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7896)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7897)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7898)  * 	IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7899)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7900) static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7901) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7902) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7903) 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7904) 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7905) 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7907) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7908) 	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7909) 		ioa_cfg->dual_raid = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7910) 	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7911) 		 ucode_vpd->major_release, ucode_vpd->card_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7912) 		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7913) 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7914) 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7916) 	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7917) 	ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7918) 	ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7919) 	ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7921) 	ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7922) 		       IPR_IOADL_FLAGS_READ_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7924) 	ipr_cmd->job_step = ipr_init_res_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7926) 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7928) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7929) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7932) static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7933) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7934) 	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7936) 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7937) 		return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7939) 	return ipr_reset_cmd_failed(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7942) static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7943) 					 __be32 res_handle, u8 sa_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7945) 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7947) 	ioarcb->res_handle = res_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7948) 	ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7949) 	ioarcb->cmd_pkt.cdb[1] = sa_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7950) 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7953) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7954)  * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7955)  * action
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7956)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7957)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7958)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7959)  *	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7960)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7961) static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7963) 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7964) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7965) 	struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7967) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7969) 	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7971) 	if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7972) 		ipr_build_ioa_service_action(ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7973) 					     cpu_to_be32(IPR_IOA_RES_HANDLE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7974) 					     IPR_IOA_SA_CHANGE_CACHE_PARAMS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7976) 		ioarcb->cmd_pkt.cdb[2] = 0x40;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7978) 		ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7979) 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7980) 			   IPR_SET_SUP_DEVICE_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7982) 		LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7983) 		return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7984) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7986) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7987) 	return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7988) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7990) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7991)  * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7992)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7993)  * @flags:	flags to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7994)  * @page:	page to inquire
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7995)  * @dma_addr:	DMA address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7996)  * @xfer_len:	transfer data length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7997)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7998)  * This utility function sends an inquiry to the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  7999)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8000)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8001)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8002)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8003) static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8004) 			      dma_addr_t dma_addr, u8 xfer_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8005) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8006) 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8008) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8009) 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8010) 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8012) 	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8013) 	ioarcb->cmd_pkt.cdb[1] = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8014) 	ioarcb->cmd_pkt.cdb[2] = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8015) 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8017) 	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8019) 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8020) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8023) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8024)  * ipr_inquiry_page_supported - Is the given inquiry page supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8025)  * @page0:		inquiry page 0 buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8026)  * @page:		page code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8027)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8028)  * This function determines if the specified inquiry page is supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8029)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8030)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8031)  *	1 if page is supported / 0 if not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8032)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8033) static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8035) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8037) 	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8038) 		if (page0->page[i] == page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8039) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8041) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8044) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8045)  * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8046)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8047)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8048)  * This function sends a Page 0xC4 inquiry to the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8049)  * to retrieve software VPD information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8050)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8051)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8052)  *	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8053)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8054) static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8056) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8057) 	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8058) 	struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8060) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8061) 	ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8062) 	memset(pageC4, 0, sizeof(*pageC4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8064) 	if (ipr_inquiry_page_supported(page0, 0xC4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8065) 		ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8066) 				  (ioa_cfg->vpd_cbs_dma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8067) 				   + offsetof(struct ipr_misc_cbs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8068) 					      pageC4_data)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8069) 				  sizeof(struct ipr_inquiry_pageC4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8070) 		return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8071) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8073) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8074) 	return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8077) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8078)  * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8079)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8080)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8081)  * This function sends a Page 0xD0 inquiry to the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8082)  * to retrieve adapter capabilities.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8083)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8084)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8085)  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8086)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8087) static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8089) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8090) 	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8091) 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8093) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8094) 	ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8095) 	memset(cap, 0, sizeof(*cap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8097) 	if (ipr_inquiry_page_supported(page0, 0xD0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8098) 		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8099) 				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8100) 				  sizeof(struct ipr_inquiry_cap));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8101) 		return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8102) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8104) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8105) 	return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8108) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8109)  * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8110)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8111)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8112)  * This function sends a Page 3 inquiry to the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8113)  * to retrieve software VPD information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8114)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8115)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8116)  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8117)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8118) static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8120) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8122) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8124) 	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8126) 	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8127) 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8128) 			  sizeof(struct ipr_inquiry_page3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8130) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8131) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8134) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8135)  * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8136)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8137)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8138)  * This function sends a Page 0 inquiry to the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8139)  * to retrieve supported inquiry pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8140)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8141)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8142)  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8143)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8144) static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8146) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8147) 	char type[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8149) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8151) 	/* Grab the type out of the VPD and store it away */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8152) 	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8153) 	type[4] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8154) 	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8156) 	if (ipr_invalid_adapter(ioa_cfg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8157) 		dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8158) 			"Adapter not supported in this hardware configuration.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8160) 		if (!ipr_testmode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8161) 			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8162) 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8163) 			list_add_tail(&ipr_cmd->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8164) 					&ioa_cfg->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8165) 			return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8166) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8169) 	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8171) 	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8172) 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8173) 			  sizeof(struct ipr_inquiry_page0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8175) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8176) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8180)  * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8181)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8182)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8183)  * This function sends a standard inquiry to the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8184)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8185)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8186)  * 	IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8187)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8188) static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8190) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8192) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8193) 	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8195) 	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8196) 			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8197) 			  sizeof(struct ipr_ioa_vpd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8199) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8200) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8203) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8204)  * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8205)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8206)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8207)  * This function send an Identify Host Request Response Queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8208)  * command to establish the HRRQ with the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8209)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8210)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8211)  * 	IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8212)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8213) static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8215) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8216) 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8217) 	struct ipr_hrr_queue *hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8219) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8220) 	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8221) 	if (ioa_cfg->identify_hrrq_index == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8222) 		dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8224) 	if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8225) 		hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8227) 		ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8228) 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8230) 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8231) 		if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8232) 			ioarcb->cmd_pkt.cdb[1] = 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8234) 		if (ioa_cfg->nvectors == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8235) 			ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8236) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8237) 			ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8239) 		ioarcb->cmd_pkt.cdb[2] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8240) 			((u64) hrrq->host_rrq_dma >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8241) 		ioarcb->cmd_pkt.cdb[3] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8242) 			((u64) hrrq->host_rrq_dma >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8243) 		ioarcb->cmd_pkt.cdb[4] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8244) 			((u64) hrrq->host_rrq_dma >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8245) 		ioarcb->cmd_pkt.cdb[5] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8246) 			((u64) hrrq->host_rrq_dma) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8247) 		ioarcb->cmd_pkt.cdb[7] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8248) 			((sizeof(u32) * hrrq->size) >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8249) 		ioarcb->cmd_pkt.cdb[8] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8250) 			(sizeof(u32) * hrrq->size) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8252) 		if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8253) 			ioarcb->cmd_pkt.cdb[9] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8254) 					ioa_cfg->identify_hrrq_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8256) 		if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8257) 			ioarcb->cmd_pkt.cdb[10] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8258) 				((u64) hrrq->host_rrq_dma >> 56) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8259) 			ioarcb->cmd_pkt.cdb[11] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8260) 				((u64) hrrq->host_rrq_dma >> 48) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8261) 			ioarcb->cmd_pkt.cdb[12] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8262) 				((u64) hrrq->host_rrq_dma >> 40) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8263) 			ioarcb->cmd_pkt.cdb[13] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8264) 				((u64) hrrq->host_rrq_dma >> 32) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8265) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8267) 		if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8268) 			ioarcb->cmd_pkt.cdb[14] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8269) 					ioa_cfg->identify_hrrq_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8271) 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8272) 			   IPR_INTERNAL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8274) 		if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8275) 			ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8277) 		LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8278) 		return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8281) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8282) 	return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8285) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8286)  * ipr_reset_timer_done - Adapter reset timer function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8287)  * @t: Timer context used to fetch ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8288)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8289)  * Description: This function is used in adapter reset processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8290)  * for timing events. If the reset_cmd pointer in the IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8291)  * config struct is not this adapter's we are doing nested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8292)  * resets and fail_all_ops will take care of freeing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8293)  * command block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8294)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8295)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8296)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8297)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8298) static void ipr_reset_timer_done(struct timer_list *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8300) 	struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8301) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8302) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8304) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8306) 	if (ioa_cfg->reset_cmd == ipr_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8307) 		list_del(&ipr_cmd->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8308) 		ipr_cmd->done(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8311) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8314) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8315)  * ipr_reset_start_timer - Start a timer for adapter reset job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8316)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8317)  * @timeout:	timeout value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8318)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8319)  * Description: This function is used in adapter reset processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8320)  * for timing events. If the reset_cmd pointer in the IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8321)  * config struct is not this adapter's we are doing nested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8322)  * resets and fail_all_ops will take care of freeing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8323)  * command block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8324)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8325)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8326)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8327)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8328) static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8329) 				  unsigned long timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8332) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8333) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8334) 	ipr_cmd->done = ipr_reset_ioa_job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8336) 	ipr_cmd->timer.expires = jiffies + timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8337) 	ipr_cmd->timer.function = ipr_reset_timer_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8338) 	add_timer(&ipr_cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8339) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8341) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8342)  * ipr_init_ioa_mem - Initialize ioa_cfg control block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8343)  * @ioa_cfg:	ioa cfg struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8344)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8345)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8346)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8347)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8348) static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8349) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8350) 	struct ipr_hrr_queue *hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8352) 	for_each_hrrq(hrrq, ioa_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8353) 		spin_lock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8354) 		memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8356) 		/* Initialize Host RRQ pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8357) 		hrrq->hrrq_start = hrrq->host_rrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8358) 		hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8359) 		hrrq->hrrq_curr = hrrq->hrrq_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8360) 		hrrq->toggle_bit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8361) 		spin_unlock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8363) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8365) 	ioa_cfg->identify_hrrq_index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8366) 	if (ioa_cfg->hrrq_num == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8367) 		atomic_set(&ioa_cfg->hrrq_index, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8368) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8369) 		atomic_set(&ioa_cfg->hrrq_index, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8371) 	/* Zero out config table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8372) 	memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8375) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8376)  * ipr_reset_next_stage - Process IPL stage change based on feedback register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8377)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8378)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8379)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8380)  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8381)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8382) static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8384) 	unsigned long stage, stage_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8385) 	u32 feedback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8386) 	volatile u32 int_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8387) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8388) 	u64 maskval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8390) 	feedback = readl(ioa_cfg->regs.init_feedback_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8391) 	stage = feedback & IPR_IPL_INIT_STAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8392) 	stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8394) 	ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8396) 	/* sanity check the stage_time value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8397) 	if (stage_time == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8398) 		stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8399) 	else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8400) 		stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8401) 	else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8402) 		stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8404) 	if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8405) 		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8406) 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8407) 		stage_time = ioa_cfg->transop_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8408) 		ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8409) 	} else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8410) 		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8411) 		if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8412) 			ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8413) 			maskval = IPR_PCII_IPL_STAGE_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8414) 			maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8415) 			writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8416) 			int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8417) 			return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8418) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8421) 	ipr_cmd->timer.expires = jiffies + stage_time * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8422) 	ipr_cmd->timer.function = ipr_oper_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8423) 	ipr_cmd->done = ipr_reset_ioa_job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8424) 	add_timer(&ipr_cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8426) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8428) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8431) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8432)  * ipr_reset_enable_ioa - Enable the IOA following a reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8433)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8434)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8435)  * This function reinitializes some control blocks and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8436)  * enables destructive diagnostics on the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8437)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8438)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8439)  * 	IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8440)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8441) static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8443) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8444) 	volatile u32 int_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8445) 	volatile u64 maskval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8446) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8448) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8449) 	ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8450) 	ipr_init_ioa_mem(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8452) 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8453) 		spin_lock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8454) 		ioa_cfg->hrrq[i].allow_interrupts = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8455) 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8457) 	if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8458) 		/* Set the adapter to the correct endian mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8459) 		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8460) 		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8461) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8463) 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8465) 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8466) 		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8467) 		       ioa_cfg->regs.clr_interrupt_mask_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8468) 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8469) 		return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8470) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8472) 	/* Enable destructive diagnostics on IOA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8473) 	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8475) 	if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8476) 		maskval = IPR_PCII_IPL_STAGE_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8477) 		maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8478) 		writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8479) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8480) 		writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8482) 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8484) 	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8486) 	if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8487) 		ipr_cmd->job_step = ipr_reset_next_stage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8488) 		return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8489) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8491) 	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8492) 	ipr_cmd->timer.function = ipr_oper_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8493) 	ipr_cmd->done = ipr_reset_ioa_job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8494) 	add_timer(&ipr_cmd->timer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8495) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8497) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8498) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8501) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8502)  * ipr_reset_wait_for_dump - Wait for a dump to timeout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8503)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8504)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8505)  * This function is invoked when an adapter dump has run out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8506)  * of processing time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8507)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8508)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8509)  * 	IPR_RC_JOB_CONTINUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8510)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8511) static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8513) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8515) 	if (ioa_cfg->sdt_state == GET_DUMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8516) 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8517) 	else if (ioa_cfg->sdt_state == READ_DUMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8518) 		ioa_cfg->sdt_state = ABORT_DUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8520) 	ioa_cfg->dump_timeout = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8521) 	ipr_cmd->job_step = ipr_reset_alert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8523) 	return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8526) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8527)  * ipr_unit_check_no_data - Log a unit check/no data error log
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8528)  * @ioa_cfg:		ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8529)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8530)  * Logs an error indicating the adapter unit checked, but for some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8531)  * reason, we were unable to fetch the unit check buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8532)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8533)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8534)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8535)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8536) static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8538) 	ioa_cfg->errors_logged++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8539) 	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8542) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8543)  * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8544)  * @ioa_cfg:		ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8545)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8546)  * Fetches the unit check buffer from the adapter by clocking the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8547)  * through the mailbox register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8548)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8549)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8550)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8551)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8552) static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8554) 	unsigned long mailbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8555) 	struct ipr_hostrcb *hostrcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8556) 	struct ipr_uc_sdt sdt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8557) 	int rc, length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8558) 	u32 ioasc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8560) 	mailbox = readl(ioa_cfg->ioa_mailbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8562) 	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8563) 		ipr_unit_check_no_data(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8564) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8567) 	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8568) 	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8569) 					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8571) 	if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8572) 	    ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8573) 	    (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8574) 		ipr_unit_check_no_data(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8575) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8578) 	/* Find length of the first sdt entry (UC buffer) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8579) 	if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8580) 		length = be32_to_cpu(sdt.entry[0].end_token);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8581) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8582) 		length = (be32_to_cpu(sdt.entry[0].end_token) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8583) 			  be32_to_cpu(sdt.entry[0].start_token)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8584) 			  IPR_FMT2_MBX_ADDR_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8586) 	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8587) 			     struct ipr_hostrcb, queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8588) 	list_del_init(&hostrcb->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8589) 	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8591) 	rc = ipr_get_ldump_data_section(ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8592) 					be32_to_cpu(sdt.entry[0].start_token),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8593) 					(__be32 *)&hostrcb->hcam,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8594) 					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8596) 	if (!rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8597) 		ipr_handle_log_data(ioa_cfg, hostrcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8598) 		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8599) 		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8600) 		    ioa_cfg->sdt_state == GET_DUMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8601) 			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8602) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8603) 		ipr_unit_check_no_data(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8605) 	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8606) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8608) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8609)  * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8610)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8611)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8612)  * Description: This function will call to get the unit check buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8613)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8614)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8615)  *	IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8616)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8617) static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8619) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8621) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8622) 	ioa_cfg->ioa_unit_checked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8623) 	ipr_get_unit_check_buffer(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8624) 	ipr_cmd->job_step = ipr_reset_alert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8625) 	ipr_reset_start_timer(ipr_cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8627) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8628) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8631) static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8633) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8635) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8637) 	if (ioa_cfg->sdt_state != GET_DUMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8638) 		return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8640) 	if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8641) 	    (readl(ioa_cfg->regs.sense_interrupt_reg) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8642) 	     IPR_PCII_MAILBOX_STABLE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8644) 		if (!ipr_cmd->u.time_left)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8645) 			dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8646) 				"Timed out waiting for Mailbox register.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8648) 		ioa_cfg->sdt_state = READ_DUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8649) 		ioa_cfg->dump_timeout = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8650) 		if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8651) 			ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8652) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8653) 			ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8654) 		ipr_cmd->job_step = ipr_reset_wait_for_dump;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8655) 		schedule_work(&ioa_cfg->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8657) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8658) 		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8659) 		ipr_reset_start_timer(ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8660) 				      IPR_CHECK_FOR_RESET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8661) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8663) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8664) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8667) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8668)  * ipr_reset_restore_cfg_space - Restore PCI config space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8669)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8670)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8671)  * Description: This function restores the saved PCI config space of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8672)  * the adapter, fails all outstanding ops back to the callers, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8673)  * fetches the dump/unit check if applicable to this reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8674)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8675)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8676)  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8677)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8678) static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8679) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8680) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8682) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8683) 	ioa_cfg->pdev->state_saved = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8684) 	pci_restore_state(ioa_cfg->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8686) 	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8687) 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8688) 		return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8689) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8691) 	ipr_fail_all_ops(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8693) 	if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8694) 		/* Set the adapter to the correct endian mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8695) 		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8696) 		readl(ioa_cfg->regs.endian_swap_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8697) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8699) 	if (ioa_cfg->ioa_unit_checked) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8700) 		if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8701) 			ipr_cmd->job_step = ipr_reset_get_unit_check_job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8702) 			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8703) 			return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8704) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8705) 			ioa_cfg->ioa_unit_checked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8706) 			ipr_get_unit_check_buffer(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8707) 			ipr_cmd->job_step = ipr_reset_alert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8708) 			ipr_reset_start_timer(ipr_cmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8709) 			return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8710) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8711) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8713) 	if (ioa_cfg->in_ioa_bringdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8714) 		ipr_cmd->job_step = ipr_ioa_bringdown_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8715) 	} else if (ioa_cfg->sdt_state == GET_DUMP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8716) 		ipr_cmd->job_step = ipr_dump_mailbox_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8717) 		ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8718) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8719) 		ipr_cmd->job_step = ipr_reset_enable_ioa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8720) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8722) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8723) 	return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8724) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8726) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8727)  * ipr_reset_bist_done - BIST has completed on the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8728)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8729)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8730)  * Description: Unblock config space and resume the reset process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8731)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8732)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8733)  * 	IPR_RC_JOB_CONTINUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8734)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8735) static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8737) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8739) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8740) 	if (ioa_cfg->cfg_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8741) 		pci_cfg_access_unlock(ioa_cfg->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8742) 	ioa_cfg->cfg_locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8743) 	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8744) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8745) 	return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8748) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8749)  * ipr_reset_start_bist - Run BIST on the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8750)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8751)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8752)  * Description: This function runs BIST on the adapter, then delays 2 seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8753)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8754)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8755)  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8756)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8757) static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8758) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8759) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8760) 	int rc = PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8762) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8763) 	if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8764) 		writel(IPR_UPROCI_SIS64_START_BIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8765) 		       ioa_cfg->regs.set_uproc_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8766) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8767) 		rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8769) 	if (rc == PCIBIOS_SUCCESSFUL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8770) 		ipr_cmd->job_step = ipr_reset_bist_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8771) 		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8772) 		rc = IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8773) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8774) 		if (ioa_cfg->cfg_locked)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8775) 			pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8776) 		ioa_cfg->cfg_locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8777) 		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8778) 		rc = IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8779) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8781) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8782) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8783) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8785) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8786)  * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8787)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8788)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8789)  * Description: This clears PCI reset to the adapter and delays two seconds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8790)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8791)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8792)  * 	IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8793)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8794) static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8796) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8797) 	ipr_cmd->job_step = ipr_reset_bist_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8798) 	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8799) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8800) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8803) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8804)  * ipr_reset_reset_work - Pulse a PCIe fundamental reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8805)  * @work:	work struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8806)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8807)  * Description: This pulses warm reset to a slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8808)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8809)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8810) static void ipr_reset_reset_work(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8811) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8812) 	struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8813) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8814) 	struct pci_dev *pdev = ioa_cfg->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8815) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8817) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8818) 	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8819) 	msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8820) 	pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8822) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8823) 	if (ioa_cfg->reset_cmd == ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8824) 		ipr_reset_ioa_job(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8825) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8826) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8829) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8830)  * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8831)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8832)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8833)  * Description: This asserts PCI reset to the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8834)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8835)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8836)  * 	IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8837)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8838) static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8840) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8842) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8843) 	INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8844) 	queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8845) 	ipr_cmd->job_step = ipr_reset_slot_reset_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8846) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8847) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8850) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8851)  * ipr_reset_block_config_access_wait - Wait for permission to block config access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8852)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8853)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8854)  * Description: This attempts to block config access to the IOA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8855)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8856)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8857)  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8858)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8859) static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8861) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8862) 	int rc = IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8864) 	if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8865) 		ioa_cfg->cfg_locked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8866) 		ipr_cmd->job_step = ioa_cfg->reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8867) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8868) 		if (ipr_cmd->u.time_left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8869) 			rc = IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8870) 			ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8871) 			ipr_reset_start_timer(ipr_cmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8872) 					      IPR_CHECK_FOR_RESET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8873) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8874) 			ipr_cmd->job_step = ioa_cfg->reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8875) 			dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8876) 				"Timed out waiting to lock config access. Resetting anyway.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8877) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8878) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8880) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8883) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8884)  * ipr_reset_block_config_access - Block config access to the IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8885)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8886)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8887)  * Description: This attempts to block config access to the IOA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8888)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8889)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8890)  * 	IPR_RC_JOB_CONTINUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8891)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8892) static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8893) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8894) 	ipr_cmd->ioa_cfg->cfg_locked = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8895) 	ipr_cmd->job_step = ipr_reset_block_config_access_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8896) 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8897) 	return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8898) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8900) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8901)  * ipr_reset_allowed - Query whether or not IOA can be reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8902)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8903)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8904)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8905)  * 	0 if reset not allowed / non-zero if reset is allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8906)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8907) static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8909) 	volatile u32 temp_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8911) 	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8912) 	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8913) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8915) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8916)  * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8917)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8918)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8919)  * Description: This function waits for adapter permission to run BIST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8920)  * then runs BIST. If the adapter does not give permission after a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8921)  * reasonable time, we will reset the adapter anyway. The impact of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8922)  * resetting the adapter without warning the adapter is the risk of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8923)  * losing the persistent error log on the adapter. If the adapter is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8924)  * reset while it is writing to the flash on the adapter, the flash
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8925)  * segment will have bad ECC and be zeroed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8926)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8927)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8928)  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8929)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8930) static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8931) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8932) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8933) 	int rc = IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8935) 	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8936) 		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8937) 		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8938) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8939) 		ipr_cmd->job_step = ipr_reset_block_config_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8940) 		rc = IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8941) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8943) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8946) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8947)  * ipr_reset_alert - Alert the adapter of a pending reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8948)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8949)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8950)  * Description: This function alerts the adapter that it will be reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8951)  * If memory space is not currently enabled, proceed directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8952)  * to running BIST on the adapter. The timer must always be started
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8953)  * so we guarantee we do not run BIST from ipr_isr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8954)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8955)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8956)  * 	IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8957)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8958) static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8960) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8961) 	u16 cmd_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8962) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8964) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8965) 	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8967) 	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8968) 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8969) 		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8970) 		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8971) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8972) 		ipr_cmd->job_step = ipr_reset_block_config_access;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8973) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8975) 	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8976) 	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8978) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8979) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8982) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8983)  * ipr_reset_quiesce_done - Complete IOA disconnect
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8984)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8985)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8986)  * Description: Freeze the adapter to complete quiesce processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8987)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8988)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8989)  * 	IPR_RC_JOB_CONTINUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8990)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8991) static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8992) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8993) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8995) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8996) 	ipr_cmd->job_step = ipr_ioa_bringdown_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8997) 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8998) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  8999) 	return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9002) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9003)  * ipr_reset_cancel_hcam_done - Check for outstanding commands
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9004)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9005)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9006)  * Description: Ensure nothing is outstanding to the IOA and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9007)  *			proceed with IOA disconnect. Otherwise reset the IOA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9008)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9009)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9010)  * 	IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9011)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9012) static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9013) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9014) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9015) 	struct ipr_cmnd *loop_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9016) 	struct ipr_hrr_queue *hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9017) 	int rc = IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9018) 	int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9020) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9021) 	ipr_cmd->job_step = ipr_reset_quiesce_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9023) 	for_each_hrrq(hrrq, ioa_cfg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9024) 		spin_lock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9025) 		list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9026) 			count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9027) 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9028) 			list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9029) 			rc = IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9030) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9031) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9032) 		spin_unlock(&hrrq->_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9034) 		if (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9035) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9036) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9038) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9039) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9042) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9043)  * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9044)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9045)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9046)  * Description: Cancel any oustanding HCAMs to the IOA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9047)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9048)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9049)  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9050)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9051) static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9053) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9054) 	int rc = IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9055) 	struct ipr_cmd_pkt *cmd_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9056) 	struct ipr_cmnd *hcam_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9057) 	struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9059) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9060) 	ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9062) 	if (!hrrq->ioa_is_dead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9063) 		if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9064) 			list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9065) 				if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9066) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9068) 				ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9069) 				ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9070) 				cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9071) 				cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9072) 				cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9073) 				cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9074) 				cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9075) 				cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9076) 				cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9077) 				cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9078) 				cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9079) 				cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9080) 				cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9081) 				cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9083) 				ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9084) 					   IPR_CANCEL_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9086) 				rc = IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9087) 				ipr_cmd->job_step = ipr_reset_cancel_hcam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9088) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9089) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9090) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9091) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9092) 		ipr_cmd->job_step = ipr_reset_alert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9094) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9095) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9096) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9098) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9099)  * ipr_reset_ucode_download_done - Microcode download completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9100)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9101)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9102)  * Description: This function unmaps the microcode download buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9103)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9104)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9105)  * 	IPR_RC_JOB_CONTINUE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9106)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9107) static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9109) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9110) 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9112) 	dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9113) 		     sglist->num_sg, DMA_TO_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9115) 	ipr_cmd->job_step = ipr_reset_alert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9116) 	return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9119) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9120)  * ipr_reset_ucode_download - Download microcode to the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9121)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9122)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9123)  * Description: This function checks to see if it there is microcode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9124)  * to download to the adapter. If there is, a download is performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9125)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9126)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9127)  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9128)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9129) static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9131) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9132) 	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9134) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9135) 	ipr_cmd->job_step = ipr_reset_alert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9137) 	if (!sglist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9138) 		return IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9140) 	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9141) 	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9142) 	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9143) 	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9144) 	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9145) 	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9146) 	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9148) 	if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9149) 		ipr_build_ucode_ioadl64(ipr_cmd, sglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9150) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9151) 		ipr_build_ucode_ioadl(ipr_cmd, sglist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9152) 	ipr_cmd->job_step = ipr_reset_ucode_download_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9154) 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9155) 		   IPR_WRITE_BUFFER_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9157) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9158) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9161) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9162)  * ipr_reset_shutdown_ioa - Shutdown the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9163)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9164)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9165)  * Description: This function issues an adapter shutdown of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9166)  * specified type to the specified adapter as part of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9167)  * adapter reset job.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9168)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9169)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9170)  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9171)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9172) static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9174) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9175) 	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9176) 	unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9177) 	int rc = IPR_RC_JOB_CONTINUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9179) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9180) 	if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9181) 		ipr_cmd->job_step = ipr_reset_cancel_hcam;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9182) 	else if (shutdown_type != IPR_SHUTDOWN_NONE &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9183) 			!ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9184) 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9185) 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9186) 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9187) 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9189) 		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9190) 			timeout = IPR_SHUTDOWN_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9191) 		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9192) 			timeout = IPR_INTERNAL_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9193) 		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9194) 			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9195) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9196) 			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9198) 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9200) 		rc = IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9201) 		ipr_cmd->job_step = ipr_reset_ucode_download;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9202) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9203) 		ipr_cmd->job_step = ipr_reset_alert;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9205) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9206) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9209) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9210)  * ipr_reset_ioa_job - Adapter reset job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9211)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9212)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9213)  * Description: This function is the job router for the adapter reset job.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9214)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9215)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9216)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9217)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9218) static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9220) 	u32 rc, ioasc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9221) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9223) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9224) 		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9226) 		if (ioa_cfg->reset_cmd != ipr_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9227) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9228) 			 * We are doing nested adapter resets and this is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9229) 			 * not the current reset job.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9230) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9231) 			list_add_tail(&ipr_cmd->queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9232) 					&ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9233) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9234) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9236) 		if (IPR_IOASC_SENSE_KEY(ioasc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9237) 			rc = ipr_cmd->job_step_failed(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9238) 			if (rc == IPR_RC_JOB_RETURN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9239) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9240) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9242) 		ipr_reinit_ipr_cmnd(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9243) 		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9244) 		rc = ipr_cmd->job_step(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9245) 	} while (rc == IPR_RC_JOB_CONTINUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9248) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9249)  * _ipr_initiate_ioa_reset - Initiate an adapter reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9250)  * @ioa_cfg:		ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9251)  * @job_step:		first job step of reset job
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9252)  * @shutdown_type:	shutdown type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9253)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9254)  * Description: This function will initiate the reset of the given adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9255)  * starting at the selected job step.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9256)  * If the caller needs to wait on the completion of the reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9257)  * the caller must sleep on the reset_wait_q.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9258)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9259)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9260)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9261)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9262) static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9263) 				    int (*job_step) (struct ipr_cmnd *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9264) 				    enum ipr_shutdown_type shutdown_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9266) 	struct ipr_cmnd *ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9267) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9269) 	ioa_cfg->in_reset_reload = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9270) 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9271) 		spin_lock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9272) 		ioa_cfg->hrrq[i].allow_cmds = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9273) 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9274) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9275) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9276) 	if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9277) 		ioa_cfg->scsi_unblock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9278) 		ioa_cfg->scsi_blocked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9279) 		scsi_block_requests(ioa_cfg->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9280) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9282) 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9283) 	ioa_cfg->reset_cmd = ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9284) 	ipr_cmd->job_step = job_step;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9285) 	ipr_cmd->u.shutdown_type = shutdown_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9287) 	ipr_reset_ioa_job(ipr_cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9290) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9291)  * ipr_initiate_ioa_reset - Initiate an adapter reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9292)  * @ioa_cfg:		ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9293)  * @shutdown_type:	shutdown type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9294)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9295)  * Description: This function will initiate the reset of the given adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9296)  * If the caller needs to wait on the completion of the reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9297)  * the caller must sleep on the reset_wait_q.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9298)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9299)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9300)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9301)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9302) static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9303) 				   enum ipr_shutdown_type shutdown_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9305) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9307) 	if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9308) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9310) 	if (ioa_cfg->in_reset_reload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9311) 		if (ioa_cfg->sdt_state == GET_DUMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9312) 			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9313) 		else if (ioa_cfg->sdt_state == READ_DUMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9314) 			ioa_cfg->sdt_state = ABORT_DUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9317) 	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9318) 		dev_err(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9319) 			"IOA taken offline - error recovery failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9321) 		ioa_cfg->reset_retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9322) 		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9323) 			spin_lock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9324) 			ioa_cfg->hrrq[i].ioa_is_dead = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9325) 			spin_unlock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9326) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9327) 		wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9329) 		if (ioa_cfg->in_ioa_bringdown) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9330) 			ioa_cfg->reset_cmd = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9331) 			ioa_cfg->in_reset_reload = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9332) 			ipr_fail_all_ops(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9333) 			wake_up_all(&ioa_cfg->reset_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9335) 			if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9336) 				ioa_cfg->scsi_unblock = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9337) 				schedule_work(&ioa_cfg->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9338) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9339) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9340) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9341) 			ioa_cfg->in_ioa_bringdown = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9342) 			shutdown_type = IPR_SHUTDOWN_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9343) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9344) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9346) 	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9347) 				shutdown_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9350) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9351)  * ipr_reset_freeze - Hold off all I/O activity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9352)  * @ipr_cmd:	ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9353)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9354)  * Description: If the PCI slot is frozen, hold off all I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9355)  * activity; then, as soon as the slot is available again,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9356)  * initiate an adapter reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9357)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9358) static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9360) 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9361) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9363) 	/* Disallow new interrupts, avoid loop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9364) 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9365) 		spin_lock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9366) 		ioa_cfg->hrrq[i].allow_interrupts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9367) 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9369) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9370) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9371) 	ipr_cmd->done = ipr_reset_ioa_job;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9372) 	return IPR_RC_JOB_RETURN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9375) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9376)  * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9377)  * @pdev:	PCI device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9378)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9379)  * Description: This routine is called to tell us that the MMIO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9380)  * access to the IOA has been restored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9381)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9382) static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9384) 	unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9385) 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9387) 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9388) 	if (!ioa_cfg->probe_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9389) 		pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9390) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9391) 	return PCI_ERS_RESULT_NEED_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9394) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9395)  * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9396)  * @pdev:	PCI device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9397)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9398)  * Description: This routine is called to tell us that the PCI bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9399)  * is down. Can't do anything here, except put the device driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9400)  * into a holding pattern, waiting for the PCI bus to come back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9401)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9402) static void ipr_pci_frozen(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9404) 	unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9405) 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9407) 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9408) 	if (ioa_cfg->probe_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9409) 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9410) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9413) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9414)  * ipr_pci_slot_reset - Called when PCI slot has been reset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9415)  * @pdev:	PCI device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9416)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9417)  * Description: This routine is called by the pci error recovery
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9418)  * code after the PCI slot has been reset, just before we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9419)  * should resume normal operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9420)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9421) static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9423) 	unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9424) 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9426) 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9427) 	if (ioa_cfg->probe_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9428) 		if (ioa_cfg->needs_warm_reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9429) 			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9430) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9431) 			_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9432) 						IPR_SHUTDOWN_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9433) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9434) 		wake_up_all(&ioa_cfg->eeh_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9435) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9436) 	return PCI_ERS_RESULT_RECOVERED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9439) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9440)  * ipr_pci_perm_failure - Called when PCI slot is dead for good.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9441)  * @pdev:	PCI device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9442)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9443)  * Description: This routine is called when the PCI bus has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9444)  * permanently failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9445)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9446) static void ipr_pci_perm_failure(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9448) 	unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9449) 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9450) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9452) 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9453) 	if (ioa_cfg->probe_done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9454) 		if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9455) 			ioa_cfg->sdt_state = ABORT_DUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9456) 		ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9457) 		ioa_cfg->in_ioa_bringdown = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9458) 		for (i = 0; i < ioa_cfg->hrrq_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9459) 			spin_lock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9460) 			ioa_cfg->hrrq[i].allow_cmds = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9461) 			spin_unlock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9462) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9463) 		wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9464) 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9465) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9466) 		wake_up_all(&ioa_cfg->eeh_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9467) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9470) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9471)  * ipr_pci_error_detected - Called when a PCI error is detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9472)  * @pdev:	PCI device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9473)  * @state:	PCI channel state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9474)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9475)  * Description: Called when a PCI error is detected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9476)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9477)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9478)  * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9479)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9480) static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9481) 					       pci_channel_state_t state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9483) 	switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9484) 	case pci_channel_io_frozen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9485) 		ipr_pci_frozen(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9486) 		return PCI_ERS_RESULT_CAN_RECOVER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9487) 	case pci_channel_io_perm_failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9488) 		ipr_pci_perm_failure(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9489) 		return PCI_ERS_RESULT_DISCONNECT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9490) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9491) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9492) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9493) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9494) 	return PCI_ERS_RESULT_NEED_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9497) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9498)  * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9499)  * @ioa_cfg:	ioa cfg struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9500)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9501)  * Description: This is the second phase of adapter initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9502)  * This function takes care of initilizing the adapter to the point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9503)  * where it can accept new commands.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9504)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9505)  * 	0 on success / -EIO on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9506)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9507) static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9509) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9510) 	unsigned long host_lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9512) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9513) 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9514) 	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9515) 	ioa_cfg->probe_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9516) 	if (ioa_cfg->needs_hard_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9517) 		ioa_cfg->needs_hard_reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9518) 		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9519) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9520) 		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9521) 					IPR_SHUTDOWN_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9522) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9524) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9525) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9528) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9529)  * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9530)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9531)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9532)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9533)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9534)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9535) static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9536) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9537) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9539) 	if (ioa_cfg->ipr_cmnd_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9540) 		for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9541) 			if (ioa_cfg->ipr_cmnd_list[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9542) 				dma_pool_free(ioa_cfg->ipr_cmd_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9543) 					      ioa_cfg->ipr_cmnd_list[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9544) 					      ioa_cfg->ipr_cmnd_list_dma[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9546) 			ioa_cfg->ipr_cmnd_list[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9547) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9550) 	dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9552) 	kfree(ioa_cfg->ipr_cmnd_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9553) 	kfree(ioa_cfg->ipr_cmnd_list_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9554) 	ioa_cfg->ipr_cmnd_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9555) 	ioa_cfg->ipr_cmnd_list_dma = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9556) 	ioa_cfg->ipr_cmd_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9559) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9560)  * ipr_free_mem - Frees memory allocated for an adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9561)  * @ioa_cfg:	ioa cfg struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9562)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9563)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9564)  * 	nothing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9565)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9566) static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9568) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9570) 	kfree(ioa_cfg->res_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9571) 	dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9572) 			  ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9573) 	ipr_free_cmd_blks(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9575) 	for (i = 0; i < ioa_cfg->hrrq_num; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9576) 		dma_free_coherent(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9577) 				  sizeof(u32) * ioa_cfg->hrrq[i].size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9578) 				  ioa_cfg->hrrq[i].host_rrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9579) 				  ioa_cfg->hrrq[i].host_rrq_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9581) 	dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9582) 			  ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9584) 	for (i = 0; i < IPR_MAX_HCAMS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9585) 		dma_free_coherent(&ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9586) 				  sizeof(struct ipr_hostrcb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9587) 				  ioa_cfg->hostrcb[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9588) 				  ioa_cfg->hostrcb_dma[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9591) 	ipr_free_dump(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9592) 	kfree(ioa_cfg->trace);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9595) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9596)  * ipr_free_irqs - Free all allocated IRQs for the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9597)  * @ioa_cfg:	ipr cfg struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9598)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9599)  * This function frees all allocated IRQs for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9600)  * specified adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9601)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9602)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9603)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9604)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9605) static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9607) 	struct pci_dev *pdev = ioa_cfg->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9608) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9610) 	for (i = 0; i < ioa_cfg->nvectors; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9611) 		free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9612) 	pci_free_irq_vectors(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9615) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9616)  * ipr_free_all_resources - Free all allocated resources for an adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9617)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9618)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9619)  * This function frees all allocated resources for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9620)  * specified adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9621)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9622)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9623)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9624)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9625) static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9627) 	struct pci_dev *pdev = ioa_cfg->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9629) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9630) 	ipr_free_irqs(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9631) 	if (ioa_cfg->reset_work_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9632) 		destroy_workqueue(ioa_cfg->reset_work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9633) 	iounmap(ioa_cfg->hdw_dma_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9634) 	pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9635) 	ipr_free_mem(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9636) 	scsi_host_put(ioa_cfg->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9637) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9638) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9641) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9642)  * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9643)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9644)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9645)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9646)  * 	0 on success / -ENOMEM on allocation failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9647)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9648) static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9650) 	struct ipr_cmnd *ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9651) 	struct ipr_ioarcb *ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9652) 	dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9653) 	int i, entries_each_hrrq, hrrq_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9655) 	ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9656) 						sizeof(struct ipr_cmnd), 512, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9658) 	if (!ioa_cfg->ipr_cmd_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9659) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9661) 	ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9662) 	ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9664) 	if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9665) 		ipr_free_cmd_blks(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9666) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9667) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9669) 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9670) 		if (ioa_cfg->hrrq_num > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9671) 			if (i == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9672) 				entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9673) 				ioa_cfg->hrrq[i].min_cmd_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9674) 				ioa_cfg->hrrq[i].max_cmd_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9675) 					(entries_each_hrrq - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9676) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9677) 				entries_each_hrrq =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9678) 					IPR_NUM_BASE_CMD_BLKS/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9679) 					(ioa_cfg->hrrq_num - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9680) 				ioa_cfg->hrrq[i].min_cmd_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9681) 					IPR_NUM_INTERNAL_CMD_BLKS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9682) 					(i - 1) * entries_each_hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9683) 				ioa_cfg->hrrq[i].max_cmd_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9684) 					(IPR_NUM_INTERNAL_CMD_BLKS +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9685) 					i * entries_each_hrrq - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9686) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9687) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9688) 			entries_each_hrrq = IPR_NUM_CMD_BLKS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9689) 			ioa_cfg->hrrq[i].min_cmd_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9690) 			ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9691) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9692) 		ioa_cfg->hrrq[i].size = entries_each_hrrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9693) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9695) 	BUG_ON(ioa_cfg->hrrq_num == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9697) 	i = IPR_NUM_CMD_BLKS -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9698) 		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9699) 	if (i > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9700) 		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9701) 		ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9702) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9704) 	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9705) 		ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9706) 				GFP_KERNEL, &dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9708) 		if (!ipr_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9709) 			ipr_free_cmd_blks(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9710) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9711) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9713) 		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9714) 		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9716) 		ioarcb = &ipr_cmd->ioarcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9717) 		ipr_cmd->dma_addr = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9718) 		if (ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9719) 			ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9720) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9721) 			ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9723) 		ioarcb->host_response_handle = cpu_to_be32(i << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9724) 		if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9725) 			ioarcb->u.sis64_addr_data.data_ioadl_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9726) 				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9727) 			ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9728) 				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9729) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9730) 			ioarcb->write_ioadl_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9731) 				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9732) 			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9733) 			ioarcb->ioasa_host_pci_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9734) 				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9735) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9736) 		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9737) 		ipr_cmd->cmd_index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9738) 		ipr_cmd->ioa_cfg = ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9739) 		ipr_cmd->sense_buffer_dma = dma_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9740) 			offsetof(struct ipr_cmnd, sense_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9742) 		ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9743) 		ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9744) 		list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9745) 		if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9746) 			hrrq_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9747) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9749) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9752) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9753)  * ipr_alloc_mem - Allocate memory for an adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9754)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9755)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9756)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9757)  * 	0 on success / non-zero for error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9758)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9759) static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9761) 	struct pci_dev *pdev = ioa_cfg->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9762) 	int i, rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9764) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9765) 	ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9766) 				       sizeof(struct ipr_resource_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9767) 				       GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9769) 	if (!ioa_cfg->res_entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9770) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9772) 	for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9773) 		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9774) 		ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9775) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9777) 	ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9778) 					      sizeof(struct ipr_misc_cbs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9779) 					      &ioa_cfg->vpd_cbs_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9780) 					      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9782) 	if (!ioa_cfg->vpd_cbs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9783) 		goto out_free_res_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9785) 	if (ipr_alloc_cmd_blks(ioa_cfg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9786) 		goto out_free_vpd_cbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9788) 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9789) 		ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9790) 					sizeof(u32) * ioa_cfg->hrrq[i].size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9791) 					&ioa_cfg->hrrq[i].host_rrq_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9792) 					GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9794) 		if (!ioa_cfg->hrrq[i].host_rrq)  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9795) 			while (--i > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9796) 				dma_free_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9797) 					sizeof(u32) * ioa_cfg->hrrq[i].size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9798) 					ioa_cfg->hrrq[i].host_rrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9799) 					ioa_cfg->hrrq[i].host_rrq_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9800) 			goto out_ipr_free_cmd_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9801) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9802) 		ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9803) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9805) 	ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9806) 						  ioa_cfg->cfg_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9807) 						  &ioa_cfg->cfg_table_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9808) 						  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9809) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9810) 	if (!ioa_cfg->u.cfg_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9811) 		goto out_free_host_rrq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9813) 	for (i = 0; i < IPR_MAX_HCAMS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9814) 		ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9815) 							 sizeof(struct ipr_hostrcb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9816) 							 &ioa_cfg->hostrcb_dma[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9817) 							 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9819) 		if (!ioa_cfg->hostrcb[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9820) 			goto out_free_hostrcb_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9822) 		ioa_cfg->hostrcb[i]->hostrcb_dma =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9823) 			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9824) 		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9825) 		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9826) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9828) 	ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9829) 				 sizeof(struct ipr_trace_entry),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9830) 				 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9832) 	if (!ioa_cfg->trace)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9833) 		goto out_free_hostrcb_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9835) 	rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9836) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9837) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9838) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9840) out_free_hostrcb_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9841) 	while (i-- > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9842) 		dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9843) 				  ioa_cfg->hostrcb[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9844) 				  ioa_cfg->hostrcb_dma[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9845) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9846) 	dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9847) 			  ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9848) out_free_host_rrq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9849) 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9850) 		dma_free_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9851) 				  sizeof(u32) * ioa_cfg->hrrq[i].size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9852) 				  ioa_cfg->hrrq[i].host_rrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9853) 				  ioa_cfg->hrrq[i].host_rrq_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9854) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9855) out_ipr_free_cmd_blocks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9856) 	ipr_free_cmd_blks(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9857) out_free_vpd_cbs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9858) 	dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9859) 			  ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9860) out_free_res_entries:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9861) 	kfree(ioa_cfg->res_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9862) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9865) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9866)  * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9867)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9868)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9869)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9870)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9871)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9872) static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9874) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9876) 	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9877) 		ioa_cfg->bus_attr[i].bus = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9878) 		ioa_cfg->bus_attr[i].qas_enabled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9879) 		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9880) 		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9881) 			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9882) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9883) 			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9884) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9887) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9888)  * ipr_init_regs - Initialize IOA registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9889)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9890)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9891)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9892)  *	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9893)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9894) static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9896) 	const struct ipr_interrupt_offsets *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9897) 	struct ipr_interrupts *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9898) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9900) 	p = &ioa_cfg->chip_cfg->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9901) 	t = &ioa_cfg->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9902) 	base = ioa_cfg->hdw_dma_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9904) 	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9905) 	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9906) 	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9907) 	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9908) 	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9909) 	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9910) 	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9911) 	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9912) 	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9913) 	t->ioarrin_reg = base + p->ioarrin_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9914) 	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9915) 	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9916) 	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9917) 	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9918) 	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9919) 	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9921) 	if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9922) 		t->init_feedback_reg = base + p->init_feedback_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9923) 		t->dump_addr_reg = base + p->dump_addr_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9924) 		t->dump_data_reg = base + p->dump_data_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9925) 		t->endian_swap_reg = base + p->endian_swap_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9926) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9927) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9929) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9930)  * ipr_init_ioa_cfg - Initialize IOA config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9931)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9932)  * @host:		scsi host struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9933)  * @pdev:		PCI dev struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9934)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9935)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9936)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9937)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9938) static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9939) 			     struct Scsi_Host *host, struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9941) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9943) 	ioa_cfg->host = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9944) 	ioa_cfg->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9945) 	ioa_cfg->log_level = ipr_log_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9946) 	ioa_cfg->doorbell = IPR_DOORBELL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9947) 	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9948) 	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9949) 	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9950) 	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9951) 	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9952) 	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9954) 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9955) 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9956) 	INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9957) 	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9958) 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9959) 	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9960) 	INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9961) 	init_waitqueue_head(&ioa_cfg->reset_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9962) 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9963) 	init_waitqueue_head(&ioa_cfg->eeh_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9964) 	ioa_cfg->sdt_state = INACTIVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9966) 	ipr_initialize_bus_attr(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9967) 	ioa_cfg->max_devs_supported = ipr_max_devs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9969) 	if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9970) 		host->max_channel = IPR_MAX_SIS64_BUSES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9971) 		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9972) 		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9973) 		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9974) 			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9975) 		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9976) 					   + ((sizeof(struct ipr_config_table_entry64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9977) 					       * ioa_cfg->max_devs_supported)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9978) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9979) 		host->max_channel = IPR_VSET_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9980) 		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9981) 		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9982) 		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9983) 			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9984) 		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9985) 					   + ((sizeof(struct ipr_config_table_entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9986) 					       * ioa_cfg->max_devs_supported)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9987) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9989) 	host->unique_id = host->host_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9990) 	host->max_cmd_len = IPR_MAX_CDB_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9991) 	host->can_queue = ioa_cfg->max_cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9992) 	pci_set_drvdata(pdev, ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9994) 	for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9995) 		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9996) 		INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9997) 		spin_lock_init(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9998) 		if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  9999) 			ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10000) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10001) 			ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10002) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10003) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10005) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10006)  * ipr_get_chip_info - Find adapter chip information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10007)  * @dev_id:		PCI device id struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10008)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10009)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10010)  * 	ptr to chip information on success / NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10011)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10012) static const struct ipr_chip_t *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10013) ipr_get_chip_info(const struct pci_device_id *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10015) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10017) 	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10018) 		if (ipr_chip[i].vendor == dev_id->vendor &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10019) 		    ipr_chip[i].device == dev_id->device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10020) 			return &ipr_chip[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10021) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10024) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10025)  * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10026)  *						during probe time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10027)  * @ioa_cfg:	ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10028)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10029)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10030)  * 	None
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10031)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10032) static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10034) 	struct pci_dev *pdev = ioa_cfg->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10035) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10036) 	if (pci_channel_offline(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10037) 		wait_event_timeout(ioa_cfg->eeh_wait_q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10038) 				   !pci_channel_offline(pdev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10039) 				   IPR_PCI_ERROR_RECOVERY_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10040) 		pci_restore_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10042) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10044) static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10045) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10046) 	int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10048) 	for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10049) 		snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10050) 			 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10051) 		ioa_cfg->vectors_info[vec_idx].
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10052) 			desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10053) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10056) static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10057) 		struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10059) 	int i, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10061) 	for (i = 1; i < ioa_cfg->nvectors; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10062) 		rc = request_irq(pci_irq_vector(pdev, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10063) 			ipr_isr_mhrrq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10064) 			0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10065) 			ioa_cfg->vectors_info[i].desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10066) 			&ioa_cfg->hrrq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10067) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10068) 			while (--i >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10069) 				free_irq(pci_irq_vector(pdev, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10070) 					&ioa_cfg->hrrq[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10071) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10072) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10073) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10074) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10075) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10077) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10078)  * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10079)  * @devp:		PCI device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10080)  * @irq:		IRQ number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10081)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10082)  * Description: Simply set the msi_received flag to 1 indicating that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10083)  * Message Signaled Interrupts are supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10084)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10085)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10086)  * 	0 on success / non-zero on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10087)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10088) static irqreturn_t ipr_test_intr(int irq, void *devp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10089) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10090) 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10091) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10092) 	irqreturn_t rc = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10094) 	dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10095) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10096) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10097) 	ioa_cfg->msi_received = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10098) 	wake_up(&ioa_cfg->msi_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10100) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10101) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10104) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10105)  * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10106)  * @ioa_cfg:		ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10107)  * @pdev:		PCI device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10108)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10109)  * Description: This routine sets up and initiates a test interrupt to determine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10110)  * if the interrupt is received via the ipr_test_intr() service routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10111)  * If the tests fails, the driver will fall back to LSI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10112)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10113)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10114)  * 	0 on success / non-zero on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10115)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10116) static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10118) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10119) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10120) 	int irq = pci_irq_vector(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10122) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10124) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10125) 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10126) 	ioa_cfg->msi_received = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10127) 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10128) 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10129) 	readl(ioa_cfg->regs.sense_interrupt_mask_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10130) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10132) 	rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10133) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10134) 		dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10135) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10136) 	} else if (ipr_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10137) 		dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10139) 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10140) 	readl(ioa_cfg->regs.sense_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10141) 	wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10142) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10143) 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10145) 	if (!ioa_cfg->msi_received) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10146) 		/* MSI test failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10147) 		dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10148) 		rc = -EOPNOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10149) 	} else if (ipr_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10150) 		dev_info(&pdev->dev, "MSI test succeeded.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10152) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10154) 	free_irq(irq, ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10156) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10158) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10161)  /* ipr_probe_ioa - Allocates memory and does first stage of initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10162)  * @pdev:		PCI device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10163)  * @dev_id:		PCI device id struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10164)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10165)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10166)  * 	0 on success / non-zero on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10167)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10168) static int ipr_probe_ioa(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10169) 			 const struct pci_device_id *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10171) 	struct ipr_ioa_cfg *ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10172) 	struct Scsi_Host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10173) 	unsigned long ipr_regs_pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10174) 	void __iomem *ipr_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10175) 	int rc = PCIBIOS_SUCCESSFUL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10176) 	volatile u32 mask, uproc, interrupts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10177) 	unsigned long lock_flags, driver_lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10178) 	unsigned int irq_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10180) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10182) 	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10183) 	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10185) 	if (!host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10186) 		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10187) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10188) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10189) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10191) 	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10192) 	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10193) 	ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10195) 	ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10197) 	if (!ioa_cfg->ipr_chip) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10198) 		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10199) 			dev_id->vendor, dev_id->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10200) 		goto out_scsi_host_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10201) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10203) 	/* set SIS 32 or SIS 64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10204) 	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10205) 	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10206) 	ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10207) 	ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10209) 	if (ipr_transop_timeout)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10210) 		ioa_cfg->transop_timeout = ipr_transop_timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10211) 	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10212) 		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10213) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10214) 		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10216) 	ioa_cfg->revid = pdev->revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10218) 	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10220) 	ipr_regs_pci = pci_resource_start(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10222) 	rc = pci_request_regions(pdev, IPR_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10223) 	if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10224) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10225) 			"Couldn't register memory range of registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10226) 		goto out_scsi_host_put;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10229) 	rc = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10231) 	if (rc || pci_channel_offline(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10232) 		if (pci_channel_offline(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10233) 			ipr_wait_for_pci_err_recovery(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10234) 			rc = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10235) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10237) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10238) 			dev_err(&pdev->dev, "Cannot enable adapter\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10239) 			ipr_wait_for_pci_err_recovery(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10240) 			goto out_release_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10241) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10244) 	ipr_regs = pci_ioremap_bar(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10246) 	if (!ipr_regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10247) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10248) 			"Couldn't map memory range of registers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10249) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10250) 		goto out_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10253) 	ioa_cfg->hdw_dma_regs = ipr_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10254) 	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10255) 	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10257) 	ipr_init_regs(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10259) 	if (ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10260) 		rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10261) 		if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10262) 			dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10263) 			rc = dma_set_mask_and_coherent(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10264) 						       DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10265) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10266) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10267) 		rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10269) 	if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10270) 		dev_err(&pdev->dev, "Failed to set DMA mask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10271) 		goto cleanup_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10272) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10274) 	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10275) 				   ioa_cfg->chip_cfg->cache_line_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10277) 	if (rc != PCIBIOS_SUCCESSFUL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10278) 		dev_err(&pdev->dev, "Write of cache line size failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10279) 		ipr_wait_for_pci_err_recovery(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10280) 		rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10281) 		goto cleanup_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10284) 	/* Issue MMIO read to ensure card is not in EEH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10285) 	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10286) 	ipr_wait_for_pci_err_recovery(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10288) 	if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10289) 		dev_err(&pdev->dev, "The max number of MSIX is %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10290) 			IPR_MAX_MSIX_VECTORS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10291) 		ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10294) 	irq_flag = PCI_IRQ_LEGACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10295) 	if (ioa_cfg->ipr_chip->has_msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10296) 		irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10297) 	rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10298) 	if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10299) 		ipr_wait_for_pci_err_recovery(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10300) 		goto cleanup_nomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10302) 	ioa_cfg->nvectors = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10304) 	if (!pdev->msi_enabled && !pdev->msix_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10305) 		ioa_cfg->clear_isr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10307) 	pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10309) 	if (pci_channel_offline(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10310) 		ipr_wait_for_pci_err_recovery(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10311) 		pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10312) 		if (pci_channel_offline(pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10313) 			rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10314) 			goto out_msi_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10315) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10318) 	if (pdev->msi_enabled || pdev->msix_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10319) 		rc = ipr_test_msi(ioa_cfg, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10320) 		switch (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10321) 		case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10322) 			dev_info(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10323) 				"Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10324) 				pdev->msix_enabled ? "-X" : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10325) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10326) 		case -EOPNOTSUPP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10327) 			ipr_wait_for_pci_err_recovery(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10328) 			pci_free_irq_vectors(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10330) 			ioa_cfg->nvectors = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10331) 			ioa_cfg->clear_isr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10332) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10333) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10334) 			goto out_msi_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10335) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10338) 	ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10339) 				(unsigned int)num_online_cpus(),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10340) 				(unsigned int)IPR_MAX_HRRQ_NUM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10342) 	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10343) 		goto out_msi_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10345) 	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10346) 		goto out_msi_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10348) 	rc = ipr_alloc_mem(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10349) 	if (rc < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10350) 		dev_err(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10351) 			"Couldn't allocate enough memory for device driver!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10352) 		goto out_msi_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10355) 	/* Save away PCI config space for use following IOA reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10356) 	rc = pci_save_state(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10358) 	if (rc != PCIBIOS_SUCCESSFUL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10359) 		dev_err(&pdev->dev, "Failed to save PCI config space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10360) 		rc = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10361) 		goto cleanup_nolog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10362) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10364) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10365) 	 * If HRRQ updated interrupt is not masked, or reset alert is set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10366) 	 * the card is in an unknown state and needs a hard reset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10367) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10368) 	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10369) 	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10370) 	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10371) 	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10372) 		ioa_cfg->needs_hard_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10373) 	if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10374) 		ioa_cfg->needs_hard_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10375) 	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10376) 		ioa_cfg->ioa_unit_checked = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10378) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10379) 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10380) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10382) 	if (pdev->msi_enabled || pdev->msix_enabled) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10383) 		name_msi_vectors(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10384) 		rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10385) 			ioa_cfg->vectors_info[0].desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10386) 			&ioa_cfg->hrrq[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10387) 		if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10388) 			rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10389) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10390) 		rc = request_irq(pdev->irq, ipr_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10391) 			 IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10392) 			 IPR_NAME, &ioa_cfg->hrrq[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10394) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10395) 		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10396) 			pdev->irq, rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10397) 		goto cleanup_nolog;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10398) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10400) 	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10401) 	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10402) 		ioa_cfg->needs_warm_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10403) 		ioa_cfg->reset = ipr_reset_slot_reset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10405) 		ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10406) 								WQ_MEM_RECLAIM, host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10408) 		if (!ioa_cfg->reset_work_q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10409) 			dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10410) 			rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10411) 			goto out_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10412) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10413) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10414) 		ioa_cfg->reset = ipr_reset_start_bist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10416) 	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10417) 	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10418) 	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10420) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10421) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10422) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10424) out_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10425) 	ipr_free_irqs(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10426) cleanup_nolog:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10427) 	ipr_free_mem(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10428) out_msi_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10429) 	ipr_wait_for_pci_err_recovery(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10430) 	pci_free_irq_vectors(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10431) cleanup_nomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10432) 	iounmap(ipr_regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10433) out_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10434) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10435) out_release_regions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10436) 	pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10437) out_scsi_host_put:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10438) 	scsi_host_put(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10439) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10442) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10443)  * ipr_initiate_ioa_bringdown - Bring down an adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10444)  * @ioa_cfg:		ioa config struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10445)  * @shutdown_type:	shutdown type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10446)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10447)  * Description: This function will initiate bringing down the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10448)  * This consists of issuing an IOA shutdown to the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10449)  * to flush the cache, and running BIST.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10450)  * If the caller needs to wait on the completion of the reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10451)  * the caller must sleep on the reset_wait_q.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10452)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10453)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10454)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10455)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10456) static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10457) 				       enum ipr_shutdown_type shutdown_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10459) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10460) 	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10461) 		ioa_cfg->sdt_state = ABORT_DUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10462) 	ioa_cfg->reset_retries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10463) 	ioa_cfg->in_ioa_bringdown = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10464) 	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10465) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10468) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10469)  * __ipr_remove - Remove a single adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10470)  * @pdev:	pci device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10471)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10472)  * Adapter hot plug remove entry point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10473)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10474)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10475)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10476)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10477) static void __ipr_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10478) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10479) 	unsigned long host_lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10480) 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10481) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10482) 	unsigned long driver_lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10483) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10485) 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10486) 	while (ioa_cfg->in_reset_reload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10487) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10488) 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10489) 		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10490) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10492) 	for (i = 0; i < ioa_cfg->hrrq_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10493) 		spin_lock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10494) 		ioa_cfg->hrrq[i].removing_ioa = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10495) 		spin_unlock(&ioa_cfg->hrrq[i]._lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10496) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10497) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10498) 	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10500) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10501) 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10502) 	flush_work(&ioa_cfg->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10503) 	if (ioa_cfg->reset_work_q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10504) 		flush_workqueue(ioa_cfg->reset_work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10505) 	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10506) 	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10508) 	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10509) 	list_del(&ioa_cfg->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10510) 	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10512) 	if (ioa_cfg->sdt_state == ABORT_DUMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10513) 		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10514) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10516) 	ipr_free_all_resources(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10518) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10521) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10522)  * ipr_remove - IOA hot plug remove entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10523)  * @pdev:	pci device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10524)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10525)  * Adapter hot plug remove entry point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10526)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10527)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10528)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10529)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10530) static void ipr_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10532) 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10534) 	ENTER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10536) 	ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10537) 			      &ipr_trace_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10538) 	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10539) 			     &ipr_dump_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10540) 	sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10541) 			&ipr_ioa_async_err_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10542) 	scsi_remove_host(ioa_cfg->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10544) 	__ipr_remove(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10546) 	LEAVE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10549) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10550)  * ipr_probe - Adapter hot plug add entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10551)  * @pdev:	pci device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10552)  * @dev_id:	pci device ID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10553)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10554)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10555)  * 	0 on success / non-zero on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10556)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10557) static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10559) 	struct ipr_ioa_cfg *ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10560) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10561) 	int rc, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10563) 	rc = ipr_probe_ioa(pdev, dev_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10565) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10566) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10568) 	ioa_cfg = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10569) 	rc = ipr_probe_ioa_part2(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10571) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10572) 		__ipr_remove(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10573) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10574) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10576) 	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10578) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10579) 		__ipr_remove(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10580) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10583) 	rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10584) 				   &ipr_trace_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10586) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10587) 		scsi_remove_host(ioa_cfg->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10588) 		__ipr_remove(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10589) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10592) 	rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10593) 			&ipr_ioa_async_err_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10595) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10596) 		ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10597) 				&ipr_dump_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10598) 		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10599) 				&ipr_trace_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10600) 		scsi_remove_host(ioa_cfg->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10601) 		__ipr_remove(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10602) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10603) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10605) 	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10606) 				   &ipr_dump_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10608) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10609) 		sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10610) 				      &ipr_ioa_async_err_log);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10611) 		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10612) 				      &ipr_trace_attr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10613) 		scsi_remove_host(ioa_cfg->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10614) 		__ipr_remove(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10615) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10616) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10617) 	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10618) 	ioa_cfg->scan_enabled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10619) 	schedule_work(&ioa_cfg->work_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10620) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10622) 	ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10624) 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10625) 		for (i = 1; i < ioa_cfg->hrrq_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10626) 			irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10627) 					ioa_cfg->iopoll_weight, ipr_iopoll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10628) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10629) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10631) 	scsi_scan_host(ioa_cfg->host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10633) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10636) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10637)  * ipr_shutdown - Shutdown handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10638)  * @pdev:	pci device struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10639)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10640)  * This function is invoked upon system shutdown/reboot. It will issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10641)  * an adapter shutdown to the adapter to flush the write cache.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10642)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10643)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10644)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10645)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10646) static void ipr_shutdown(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10648) 	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10649) 	unsigned long lock_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10650) 	enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10651) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10653) 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10654) 	if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10655) 		ioa_cfg->iopoll_weight = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10656) 		for (i = 1; i < ioa_cfg->hrrq_num; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10657) 			irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10658) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10660) 	while (ioa_cfg->in_reset_reload) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10661) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10662) 		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10663) 		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10664) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10666) 	if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10667) 		shutdown_type = IPR_SHUTDOWN_QUIESCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10669) 	ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10670) 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10671) 	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10672) 	if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10673) 		ipr_free_irqs(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10674) 		pci_disable_device(ioa_cfg->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10678) static struct pci_device_id ipr_pci_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10679) 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10680) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10681) 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10682) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10683) 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10684) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10685) 	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10686) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10687) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10688) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10689) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10690) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10691) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10692) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10693) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10694) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10695) 		IPR_USE_LONG_TRANSOP_TIMEOUT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10696) 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10697) 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10698) 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10699) 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10700) 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10701) 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10702) 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10703) 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10704) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10705) 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10706) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10707) 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10708) 	      IPR_USE_LONG_TRANSOP_TIMEOUT},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10709) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10710) 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10711) 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10712) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10713) 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10714) 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10715) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10716) 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10717) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10718) 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10719) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10720) 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10721) 	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10722) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10723) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10724) 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10725) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10726) 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10727) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10728) 		IPR_USE_LONG_TRANSOP_TIMEOUT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10729) 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10730) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10731) 		IPR_USE_LONG_TRANSOP_TIMEOUT },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10732) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10733) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10734) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10735) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10736) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10737) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10738) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10739) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10740) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10741) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10742) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10743) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10744) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10745) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10746) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10747) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10748) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10749) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10750) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10751) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10752) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10753) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10754) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10755) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10756) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10757) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10758) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10759) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10760) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10761) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10762) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10763) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10764) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10765) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10766) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10767) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10768) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10769) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10770) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10771) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10772) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10773) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10774) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10775) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10776) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10777) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10778) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10779) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10780) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10781) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10782) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10783) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10784) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10785) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10786) 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10787) 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10788) 	{ }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10789) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10790) MODULE_DEVICE_TABLE(pci, ipr_pci_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10792) static const struct pci_error_handlers ipr_err_handler = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10793) 	.error_detected = ipr_pci_error_detected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10794) 	.mmio_enabled = ipr_pci_mmio_enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10795) 	.slot_reset = ipr_pci_slot_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10796) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10798) static struct pci_driver ipr_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10799) 	.name = IPR_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10800) 	.id_table = ipr_pci_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10801) 	.probe = ipr_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10802) 	.remove = ipr_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10803) 	.shutdown = ipr_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10804) 	.err_handler = &ipr_err_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10805) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10807) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10808)  * ipr_halt_done - Shutdown prepare completion
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10809)  * @ipr_cmd:   ipr command struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10810)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10811)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10812)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10813)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10814) static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10816) 	list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10819) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10820)  * ipr_halt - Issue shutdown prepare to all adapters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10821)  * @nb: Notifier block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10822)  * @event: Notifier event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10823)  * @buf: Notifier data (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10824)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10825)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10826)  * 	NOTIFY_OK on success / NOTIFY_DONE on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10827)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10828) static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10830) 	struct ipr_cmnd *ipr_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10831) 	struct ipr_ioa_cfg *ioa_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10832) 	unsigned long flags = 0, driver_lock_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10834) 	if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10835) 		return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10837) 	spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10839) 	list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10840) 		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10841) 		if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10842) 		    (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10843) 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10844) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10845) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10847) 		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10848) 		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10849) 		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10850) 		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10851) 		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10853) 		ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10854) 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10855) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10856) 	spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10858) 	return NOTIFY_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10861) static struct notifier_block ipr_notifier = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10862) 	ipr_halt, NULL, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10863) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10865) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10866)  * ipr_init - Module entry point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10867)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10868)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10869)  * 	0 on success / negative value on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10870)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10871) static int __init ipr_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10873) 	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10874) 		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10876) 	register_reboot_notifier(&ipr_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10877) 	return pci_register_driver(&ipr_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10880) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10881)  * ipr_exit - Module unload
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10882)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10883)  * Module unload entry point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10884)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10885)  * Return value:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10886)  * 	none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10887)  **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10888) static void __exit ipr_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10890) 	unregister_reboot_notifier(&ipr_notifier);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10891) 	pci_unregister_driver(&ipr_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10894) module_init(ipr_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10895) module_exit(ipr_exit);