Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *	Adaptec AAC series RAID controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *	(c) Copyright 2001 Red Hat Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * based on the old aacraid driver that is..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Adaptec aacraid device driver for Linux.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Copyright (c) 2000-2010 Adaptec, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * Module Name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *  commsup.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * Abstract: Contain all routines that are required for FSA host/adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *    communication.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/crash_dump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/blkdev.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/kthread.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/bcd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include "aacraid.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  *	fib_map_alloc		-	allocate the fib objects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  *	@dev: Adapter to allocate for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *	Allocate and map the shared PCI space for the FIB blocks used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  *	talk to the Adaptec firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) static int fib_map_alloc(struct aac_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 		dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 		dev->max_cmd_size = dev->max_fib_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 		dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 		dev->max_cmd_size = dev->max_fib_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	dprintk((KERN_INFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	  "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	  &dev->pdev->dev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	  AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	dev->hw_fib_va = dma_alloc_coherent(&dev->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 		(dev->max_cmd_size + sizeof(struct aac_fib_xporthdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 		* (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 		&dev->hw_fib_pa, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	if (dev->hw_fib_va == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  *	aac_fib_map_free		-	free the fib objects
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  *	@dev: Adapter to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  *	Free the PCI mappings and the memory allocated for FIB blocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  *	on this adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) void aac_fib_map_free(struct aac_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	size_t alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	size_t fib_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	int num_fibs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	if(!dev->hw_fib_va || !dev->max_cmd_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	alloc_size = fib_size * num_fibs + ALIGN32 - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	dma_free_coherent(&dev->pdev->dev, alloc_size, dev->hw_fib_va,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 			  dev->hw_fib_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	dev->hw_fib_va = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	dev->hw_fib_pa = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) void aac_fib_vector_assign(struct aac_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	u32 i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	u32 vector = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	struct fib *fibptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	for (i = 0, fibptr = &dev->fibs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 		i++, fibptr++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		if ((dev->max_msix == 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 		  (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 			- dev->vector_cap))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 			fibptr->vector_no = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 			fibptr->vector_no = vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 			vector++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 			if (vector == dev->max_msix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 				vector = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  *	aac_fib_setup	-	setup the fibs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126)  *	@dev: Adapter to set up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  *	Allocate the PCI space for the fibs, map it and then initialise the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  *	fib area, the unmapped fib data and also the free list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) int aac_fib_setup(struct aac_dev * dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	struct fib *fibptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	struct hw_fib *hw_fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	dma_addr_t hw_fib_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	u32 max_cmds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	while (((i = fib_map_alloc(dev)) == -ENOMEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 		if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 			dev->init->r7.max_io_commands = cpu_to_le32(max_cmds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	if (i<0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	memset(dev->hw_fib_va, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		(dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 		(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	/* 32 byte alignment for PMC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	hw_fib    = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 					(hw_fib_pa - dev->hw_fib_pa));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	/* add Xport header */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		sizeof(struct aac_fib_xporthdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	hw_fib_pa += sizeof(struct aac_fib_xporthdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	 *	Initialise the fibs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	for (i = 0, fibptr = &dev->fibs[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 		i++, fibptr++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		fibptr->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		fibptr->size = sizeof(struct fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		fibptr->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		fibptr->hw_fib_va = hw_fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		fibptr->data = (void *) fibptr->hw_fib_va->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		fibptr->next = fibptr+1;	/* Forward chain the fibs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		init_completion(&fibptr->event_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		spin_lock_init(&fibptr->event_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		hw_fib->header.XferState = cpu_to_le32(0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 		hw_fib->header.SenderSize =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 			cpu_to_le16(dev->max_fib_size);	/* ?? max_cmd_size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 		fibptr->hw_fib_pa = hw_fib_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		fibptr->hw_sgl_pa = hw_fib_pa +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 			offsetof(struct aac_hba_cmd_req, sge[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		 * one element is for the ptr to the separate sg list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		 * second element for 32 byte alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 		fibptr->hw_error_pa = hw_fib_pa +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 			offsetof(struct aac_native_hba, resp.resp_bytes[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 			dev->max_cmd_size + sizeof(struct aac_fib_xporthdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		hw_fib_pa = hw_fib_pa +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 			dev->max_cmd_size + sizeof(struct aac_fib_xporthdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	 *Assign vector numbers to fibs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	aac_fib_vector_assign(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	 *	Add the fib chain to the free list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	*	Set 8 fibs aside for management tools
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  *	aac_fib_alloc_tag-allocate a fib using tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  *	@dev: Adapter to allocate the fib for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  *	@scmd: SCSI command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  *	Allocate a fib from the adapter fib pool using tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  *	from the blk layer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	struct fib *fibptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	fibptr = &dev->fibs[scmd->request->tag];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	 *	Null out fields that depend on being zero at the start of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	 *	each I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	fibptr->hw_fib_va->header.XferState = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	fibptr->callback_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	fibptr->callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	fibptr->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	return fibptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242)  *	aac_fib_alloc	-	allocate a fib
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243)  *	@dev: Adapter to allocate the fib for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245)  *	Allocate a fib from the adapter fib pool. If the pool is empty we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246)  *	return NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) struct fib *aac_fib_alloc(struct aac_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	struct fib * fibptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	spin_lock_irqsave(&dev->fib_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	fibptr = dev->free_fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	if(!fibptr){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		spin_unlock_irqrestore(&dev->fib_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		return fibptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	dev->free_fib = fibptr->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	spin_unlock_irqrestore(&dev->fib_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	 *	Set the proper node type code and node byte size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	fibptr->size = sizeof(struct fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	 *	Null out fields that depend on being zero at the start of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	 *	each I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	fibptr->hw_fib_va->header.XferState = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	fibptr->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	fibptr->callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	fibptr->callback_data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	return fibptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  *	aac_fib_free	-	free a fib
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  *	@fibptr: fib to free up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  *	Frees up a fib and places it on the appropriate queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) void aac_fib_free(struct fib *fibptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	if (fibptr->done == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		aac_config.fib_timeouts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		fibptr->hw_fib_va->header.XferState != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 			 (void*)fibptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 			 le32_to_cpu(fibptr->hw_fib_va->header.XferState));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	fibptr->next = fibptr->dev->free_fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	fibptr->dev->free_fib = fibptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307)  *	aac_fib_init	-	initialise a fib
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308)  *	@fibptr: The fib to initialize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  *	Set up the generic fib fields ready for use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) void aac_fib_init(struct fib *fibptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	struct hw_fib *hw_fib = fibptr->hw_fib_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	hw_fib->header.StructType = FIB_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  *	fib_deallocate		-	deallocate a fib
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)  *	@fibptr: fib to deallocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  *	Will deallocate and return to the free pool the FIB pointed to by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  *	caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) static void fib_dealloc(struct fib * fibptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	struct hw_fib *hw_fib = fibptr->hw_fib_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	hw_fib->header.XferState = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  *	Commuication primitives define and support the queuing method we use to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341)  *	support host to adapter commuication. All queue accesses happen through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342)  *	these routines and are the only routines which have a knowledge of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343)  *	 how these queues are implemented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347)  *	aac_get_entry		-	get a queue entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348)  *	@dev: Adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349)  *	@qid: Queue Number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350)  *	@entry: Entry return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351)  *	@index: Index return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352)  *	@nonotify: notification control
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354)  *	With a priority the routine returns a queue entry if the queue has free entries. If the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355)  *	is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356)  *	returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	struct aac_queue * q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	unsigned long idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	 *	All of the queues wrap when they reach the end, so we check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	 *	to see if they have reached the end and if they have we just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	 *	set the index back to zero. This is a wrap. You could or off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	 *	the high bits in all updates but this is a bit faster I think.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	q = &dev->queues->queue[qid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	idx = *index = le32_to_cpu(*(q->headers.producer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	/* Interrupt Moderation, only interrupt for first two entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	if (idx != le32_to_cpu(*(q->headers.consumer))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		if (--idx == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			if (qid == AdapNormCmdQueue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 				idx = ADAP_NORM_CMD_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 				idx = ADAP_NORM_RESP_ENTRIES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		if (idx != le32_to_cpu(*(q->headers.consumer)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			*nonotify = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	if (qid == AdapNormCmdQueue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		if (*index >= ADAP_NORM_CMD_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 			*index = 0; /* Wrap to front of the Producer Queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		if (*index >= ADAP_NORM_RESP_ENTRIES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 			*index = 0; /* Wrap to front of the Producer Queue. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	/* Queue is full */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 				qid, atomic_read(&q->numpending));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		*entry = q->base + *index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406)  *	aac_queue_get		-	get the next free QE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407)  *	@dev: Adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408)  *	@index: Returned index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409)  *	@qid: Queue number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  *	@hw_fib: Fib to associate with the queue entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  *	@wait: Wait if queue full
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412)  *	@fibptr: Driver fib object to go with fib
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413)  *	@nonotify: Don't notify the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415)  *	Gets the next free QE off the requested priorty adapter command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416)  *	queue and associates the Fib with the QE. The QE represented by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417)  *	index is ready to insert on the queue when this routine returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418)  *	success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	struct aac_entry * entry = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	int map = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	if (qid == AdapNormCmdQueue) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		/*  if no entries wait for some if caller wants to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 		while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 			printk(KERN_ERR "GetEntries failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		 *	Setup queue entry with a command, status and fib mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		map = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 			/* if no entries wait for some if caller wants to */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		 *	Setup queue entry with command, status and fib mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		entry->addr = hw_fib->header.SenderFibAddress;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 			/* Restore adapters pointer to the FIB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress;  /* Let the adapter now where to find its data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		map = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	 *	If MapFib is true than we need to map the Fib and put pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	 *	in the queue entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	if (map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459)  *	Define the highest level of host to adapter communication routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460)  *	These routines will support host to adapter FS commuication. These
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461)  *	routines have no knowledge of the commuication method used. This level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462)  *	sends and receives FIBs. This level has no knowledge of how these FIBs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463)  *	get passed back and forth.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467)  *	aac_fib_send	-	send a fib to the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468)  *	@command: Command to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469)  *	@fibptr: The fib
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470)  *	@size: Size of fib data area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471)  *	@priority: Priority of Fib
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472)  *	@wait: Async/sync select
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473)  *	@reply: True if a reply is wanted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474)  *	@callback: Called with reply
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475)  *	@callback_data: Passed to callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477)  *	Sends the requested FIB to the adapter and optionally will wait for a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478)  *	response FIB. If the caller does not wish to wait for a response than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479)  *	an event to wait on must be supplied. This event will be set when a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480)  *	response FIB is received from the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		int priority, int wait, int reply, fib_callback callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		void *callback_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	struct aac_dev * dev = fibptr->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	unsigned long mflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	unsigned long sflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	if (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	 *	There are 5 cases with the wait and response requested flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	 *	The only invalid cases are if the caller requests to wait and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	 *	does not request a response and if the caller does not want a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	 *	response and the Fib is not allocated from pool. If a response
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	 *	is not requested the Fib will just be deallocaed by the DPC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	 *	routine when the response comes back from the adapter. No
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	 *	further processing will be done besides deleting the Fib. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	 *	will have a debug mode where the adapter can notify the host
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	 *	it had a problem and the host can log that fact.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	fibptr->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	if (wait && !reply) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	} else if (!wait && reply) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	} else if (!wait && !reply) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	} else if (wait && reply) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		FIB_COUNTER_INCREMENT(aac_config.NormalSent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	 *	Map the fib into 32bits by using the fib number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	hw_fib->header.SenderFibAddress =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	/* use the same shifted value for handle to be compatible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	 * with the new native hba command handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	hw_fib->header.Handle =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	 *	Set FIB state to indicate where it came from and if we want a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	 *	response from the adapter. Also load the command from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	 *	caller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	 *	Map the hw fib pointer as a 32bit value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	hw_fib->header.Command = cpu_to_le16(command);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	 *	Set the size of the Fib we want to send to the adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	 *	Get a queue entry connect the FIB to it and send an notify
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	 *	the adapter a command is ready.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	 *	Fill in the Callback and CallbackContext if we are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	 *	going to wait.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	if (!wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		fibptr->callback = callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		fibptr->callback_data = callback_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		fibptr->flags = FIB_CONTEXT_FLAG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	fibptr->done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	FIB_COUNTER_INCREMENT(aac_config.FibsSent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	dprintk((KERN_DEBUG "Fib contents:.\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	dprintk((KERN_DEBUG "  Command =               %d.\n", le32_to_cpu(hw_fib->header.Command)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	dprintk((KERN_DEBUG "  SubCommand =            %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	dprintk((KERN_DEBUG "  XferState  =            %x.\n", le32_to_cpu(hw_fib->header.XferState)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib_va));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	if (!dev->queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		spin_lock_irqsave(&dev->manage_lock, mflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 			printk(KERN_INFO "No management Fibs Available:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 						dev->management_fib_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 			spin_unlock_irqrestore(&dev->manage_lock, mflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		dev->management_fib_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 		spin_unlock_irqrestore(&dev->manage_lock, mflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		spin_lock_irqsave(&fibptr->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	if (dev->sync_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 		if (wait)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 			spin_unlock_irqrestore(&fibptr->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 		spin_lock_irqsave(&dev->sync_lock, sflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		if (dev->sync_fib) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 			list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 			spin_unlock_irqrestore(&dev->sync_lock, sflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 			dev->sync_fib = fibptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 			spin_unlock_irqrestore(&dev->sync_lock, sflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 			aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 				(u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 				NULL, NULL, NULL, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 			fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 			if (wait_for_completion_interruptible(&fibptr->event_wait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 				fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 				return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	if (aac_adapter_deliver(fibptr) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 			spin_unlock_irqrestore(&fibptr->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 			spin_lock_irqsave(&dev->manage_lock, mflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			dev->management_fib_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 			spin_unlock_irqrestore(&dev->manage_lock, mflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	 *	If the caller wanted us to wait for response wait now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 		/* Only set for first known interruptable command */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		if (wait < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			 * *VERY* Dangerous to time out a command, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			 * assumption is made that we have no hope of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			 * functioning because an interrupt routing or other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 			 * hardware failure has occurred.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 			unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			while (!try_wait_for_completion(&fibptr->event_wait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 				int blink;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 				if (time_is_before_eq_jiffies(timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 					struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 					atomic_dec(&q->numpending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 					if (wait == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	        				printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 						  "Usually a result of a PCI interrupt routing problem;\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 						  "update mother board BIOS or consider utilizing one of\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 						  "the SAFE mode kernel options (acpi, apic etc)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 					return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 				if (unlikely(aac_pci_offline(dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 					return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 				if ((blink = aac_adapter_check_health(dev)) > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 					if (wait == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	        				printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 						  "Usually a result of a serious unrecoverable hardware problem\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 						  blink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 					return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 				 * Allow other processes / CPUS to use core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 				schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		} else if (wait_for_completion_interruptible(&fibptr->event_wait)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			/* Do nothing ... satisfy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 			 * wait_for_completion_interruptible must_check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		spin_lock_irqsave(&fibptr->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		if (fibptr->done == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			fibptr->done = 2; /* Tell interrupt we aborted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 			spin_unlock_irqrestore(&fibptr->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		BUG_ON(fibptr->done == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 			return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	 *	If the user does not want a response than return success otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	 *	return pending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	if (reply)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		void *callback_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	struct aac_dev *dev = fibptr->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	int wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	unsigned long flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	unsigned long mflags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			fibptr->hw_fib_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	if (callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		wait = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		fibptr->callback = callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		fibptr->callback_data = callback_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		wait = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	hbacmd->iu_type = command;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		/* bit1 of request_id must be 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		hbacmd->request_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 			cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		spin_lock_irqsave(&dev->manage_lock, mflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			spin_unlock_irqrestore(&dev->manage_lock, mflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		dev->management_fib_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		spin_unlock_irqrestore(&dev->manage_lock, mflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		spin_lock_irqsave(&fibptr->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	if (aac_adapter_deliver(fibptr) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			spin_unlock_irqrestore(&fibptr->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 			spin_lock_irqsave(&dev->manage_lock, mflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 			dev->management_fib_count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 			spin_unlock_irqrestore(&dev->manage_lock, mflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	FIB_COUNTER_INCREMENT(aac_config.NativeSent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	if (wait) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		if (unlikely(aac_pci_offline(dev)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 		if (wait_for_completion_interruptible(&fibptr->event_wait))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			fibptr->done = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		spin_lock_irqsave(&fibptr->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		if ((fibptr->done == 0) || (fibptr->done == 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			fibptr->done = 2; /* Tell interrupt we aborted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			spin_unlock_irqrestore(&fibptr->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			return -ERESTARTSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		spin_unlock_irqrestore(&fibptr->event_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		WARN_ON(fibptr->done == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			return -ETIMEDOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	return -EINPROGRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790)  *	aac_consumer_get	-	get the top of the queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791)  *	@dev: Adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792)  *	@q: Queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793)  *	@entry: Return entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795)  *	Will return a pointer to the entry on the top of the queue requested that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796)  *	we are a consumer of, and return the address of the queue entry. It does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797)  *	not change the state of the queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		status = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		 *	The consumer index must be wrapped if we have reached
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		 *	the end of the queue, else we just use the entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 		 *	pointed to by the header index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		if (le32_to_cpu(*q->headers.consumer) >= q->entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			index = le32_to_cpu(*q->headers.consumer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		*entry = q->base + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		status = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	return(status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823)  *	aac_consumer_free	-	free consumer entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824)  *	@dev: Adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825)  *	@q: Queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826)  *	@qid: Queue ident
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828)  *	Frees up the current top of the queue we are a consumer of. If the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829)  *	queue was full notify the producer that the queue is no longer full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	int wasfull = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	u32 notify;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		wasfull = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	if (le32_to_cpu(*q->headers.consumer) >= q->entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		*q->headers.consumer = cpu_to_le32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		le32_add_cpu(q->headers.consumer, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (wasfull) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		switch (qid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		case HostNormCmdQueue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			notify = HostNormCmdNotFull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		case HostNormRespQueue:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 			notify = HostNormRespNotFull;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		aac_adapter_notify(dev, notify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863)  *	aac_fib_adapter_complete	-	complete adapter issued fib
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864)  *	@fibptr: fib to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865)  *	@size: size of fib
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867)  *	Will do all necessary work to complete a FIB that was sent from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868)  *	the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	struct aac_dev * dev = fibptr->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	struct aac_queue * q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	unsigned long nointr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	unsigned long qflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		kfree(hw_fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	if (hw_fib->header.XferState == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		if (dev->comm_interface == AAC_COMM_MESSAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 			kfree(hw_fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	 *	If we plan to do anything check the structure type first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	if (hw_fib->header.StructType != FIB_MAGIC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	    hw_fib->header.StructType != FIB_MAGIC2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	    hw_fib->header.StructType != FIB_MAGIC2_64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		if (dev->comm_interface == AAC_COMM_MESSAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			kfree(hw_fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	 *	This block handles the case where the adapter had sent us a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	 *	command and we have finished processing the command. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	 *	call completeFib when we are done processing the command
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	 *	and want to send a response back to the adapter. This will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	 *	send the completed cdb to the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		if (dev->comm_interface == AAC_COMM_MESSAGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 			kfree (hw_fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 			u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 			hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			if (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 				size += sizeof(struct aac_fibhdr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 				if (size > le16_to_cpu(hw_fib->header.SenderSize))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 					return -EMSGSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 				hw_fib->header.Size = cpu_to_le16(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 			q = &dev->queues->queue[AdapNormRespQueue];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			spin_lock_irqsave(q->lock, qflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 			aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 			*(q->headers.producer) = cpu_to_le32(index + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 			spin_unlock_irqrestore(q->lock, qflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			if (!(nointr & (int)aac_config.irq_mod))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 				aac_adapter_notify(dev, AdapNormRespQueue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		printk(KERN_WARNING "aac_fib_adapter_complete: "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			"Unknown xferstate detected.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  *	aac_fib_complete	-	fib completion handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  *	@fibptr: FIB to complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940)  *	Will do all necessary work to complete a FIB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) int aac_fib_complete(struct fib *fibptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		fib_dealloc(fibptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	 *	Check for a fib which has already been completed or with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	 *	status wait timeout
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	if (hw_fib->header.XferState == 0 || fibptr->done == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	 *	If we plan to do anything check the structure type first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	if (hw_fib->header.StructType != FIB_MAGIC &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	    hw_fib->header.StructType != FIB_MAGIC2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	    hw_fib->header.StructType != FIB_MAGIC2_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	 *	This block completes a cdb which orginated on the host and we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	 *	just need to deallocate the cdb or reinit it. At this point the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	 *	command is complete that we had sent to the adapter and this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	 *	cdb could be reused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		fib_dealloc(fibptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		 *	This handles the case when the host has aborted the I/O
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		 *	to the adapter because the adapter is not responding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		fib_dealloc(fibptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		fib_dealloc(fibptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  *	aac_printf	-	handle printf from firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  *	@dev: Adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997)  *	@val: Message info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999)  *	Print a message passed to us by the controller firmware on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)  *	Adaptec board
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) void aac_printf(struct aac_dev *dev, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	char *cp = dev->printfbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	if (dev->printf_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		int length = val & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		int level = (val >> 16) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		 *	The size of the printfbuf is set in port.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		 *	There is no variable or define for it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		if (length > 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			length = 255;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		if (cp[length] != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			cp[length] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		if (level == LOG_AAC_HIGH_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 			printk(KERN_WARNING "%s:%s", dev->name, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			printk(KERN_INFO "%s:%s", dev->name, cp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	memset(cp, 0, 256);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	return le32_to_cpu(((__le32 *)aifcmd->data)[index]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	switch (aac_aif_data(aifcmd, 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	case AifBuCacheDataLoss:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		if (aac_aif_data(aifcmd, 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			aac_aif_data(aifcmd, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	case AifBuCacheDataRecover:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		if (aac_aif_data(aifcmd, 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			aac_aif_data(aifcmd, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) #define AIF_SNIFF_TIMEOUT	(500*HZ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055)  *	aac_handle_aif		-	Handle a message from the firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)  *	@dev: Which adapter this fib is from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057)  *	@fibptr: Pointer to fibptr from adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059)  *	This routine handles a driver notify fib from the adapter and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060)  *	dispatches it to the appropriate routine for handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	struct hw_fib * hw_fib = fibptr->hw_fib_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	u32 channel, id, lun, container;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	struct scsi_device *device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		NOTHING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		DELETE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		ADD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		CHANGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	} device_config_needed = NOTHING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	/* Sniff for container changes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	if (!dev || !dev->fsa_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	container = channel = id = lun = (u32)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	 *	We have set this up to try and minimize the number of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	 * re-configures that take place. As a result of this when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	 * certain AIF's come in we will set a flag waiting for another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	 * type of AIF before setting the re-config flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	switch (le32_to_cpu(aifcmd->command)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	case AifCmdDriverNotify:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		case AifRawDeviceRemove:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			if ((container >> 28)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 				container = (u32)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			channel = (container >> 24) & 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 			if (channel >= dev->maximum_num_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 				container = (u32)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 			id = container & 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			if (id >= dev->maximum_num_physicals) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 				container = (u32)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 			lun = (container >> 16) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			container = (u32)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			channel = aac_phys_to_logical(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 			device_config_needed = DELETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		 *	Morph or Expand complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		case AifDenMorphComplete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		case AifDenVolumeExtendComplete:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 			if (container >= dev->maximum_num_containers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			 *	Find the scsi_device associated with the SCSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 			 * address. Make sure we have the right array, and if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 			 * so set the flag to initiate a new re-config once we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 			 * see an AifEnConfigChange AIF come through.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 				device = scsi_device_lookup(dev->scsi_host_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 					CONTAINER_TO_CHANNEL(container),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 					CONTAINER_TO_ID(container),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 					CONTAINER_TO_LUN(container));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 				if (device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 					dev->fsa_dev[container].config_needed = CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 					dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 					dev->fsa_dev[container].config_waiting_stamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 					scsi_device_put(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		 *	If we are waiting on something and this happens to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		 * that thing then set the re-configure flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		if (container != (u32)-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 			if (container >= dev->maximum_num_containers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 			if ((dev->fsa_dev[container].config_waiting_on ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 				dev->fsa_dev[container].config_waiting_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		} else for (container = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		    container < dev->maximum_num_containers; ++container) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 			if ((dev->fsa_dev[container].config_waiting_on ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 				dev->fsa_dev[container].config_waiting_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	case AifCmdEventNotify:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		case AifEnBatteryEvent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 			dev->cache_protected =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 				(((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 		 *	Add an Array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		case AifEnAddContainer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			if (container >= dev->maximum_num_containers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 			dev->fsa_dev[container].config_needed = ADD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 			dev->fsa_dev[container].config_waiting_on =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 				AifEnConfigChange;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 			dev->fsa_dev[container].config_waiting_stamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		 *	Delete an Array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		case AifEnDeleteContainer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 			if (container >= dev->maximum_num_containers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 			dev->fsa_dev[container].config_needed = DELETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 			dev->fsa_dev[container].config_waiting_on =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 				AifEnConfigChange;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 			dev->fsa_dev[container].config_waiting_stamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		 *	Container change detected. If we currently are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		 * waiting on something else, setup to wait on a Config Change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		case AifEnContainerChange:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 			if (container >= dev->maximum_num_containers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 			if (dev->fsa_dev[container].config_waiting_on &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 			dev->fsa_dev[container].config_needed = CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 			dev->fsa_dev[container].config_waiting_on =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 				AifEnConfigChange;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 			dev->fsa_dev[container].config_waiting_stamp = jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		case AifEnConfigChange:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		case AifEnAddJBOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		case AifEnDeleteJBOD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 			if ((container >> 28)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 				container = (u32)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 			channel = (container >> 24) & 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 			if (channel >= dev->maximum_num_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 				container = (u32)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 			id = container & 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 			if (id >= dev->maximum_num_physicals) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 				container = (u32)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 			lun = (container >> 16) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 			container = (u32)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 			channel = aac_phys_to_logical(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 			device_config_needed =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 			  (((__le32 *)aifcmd->data)[0] ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 			    cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 			if (device_config_needed == ADD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 				device = scsi_device_lookup(dev->scsi_host_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 					channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 					id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 					lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 				if (device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 					scsi_remove_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 					scsi_device_put(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		case AifEnEnclosureManagement:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 			 * If in JBOD mode, automatic exposure of new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 			 * physical target to be suppressed until configured.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 			if (dev->jbod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 			case EM_DRIVE_INSERTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 			case EM_DRIVE_REMOVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 			case EM_SES_DRIVE_INSERTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 			case EM_SES_DRIVE_REMOVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 				container = le32_to_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 					((__le32 *)aifcmd->data)[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 				if ((container >> 28)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 					container = (u32)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 				channel = (container >> 24) & 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 				if (channel >= dev->maximum_num_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 					container = (u32)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 				id = container & 0xFFFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 				lun = (container >> 16) & 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 				container = (u32)-1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 				if (id >= dev->maximum_num_physicals) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 					/* legacy dev_t ? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 					if ((0x2000 <= id) || lun || channel ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 					  ((channel = (id >> 7) & 0x3F) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 					  dev->maximum_num_channels))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 						break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 					lun = (id >> 4) & 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 					id &= 0xF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 				channel = aac_phys_to_logical(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 				device_config_needed =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 				  ((((__le32 *)aifcmd->data)[3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 				    == cpu_to_le32(EM_DRIVE_INSERTION)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 				    (((__le32 *)aifcmd->data)[3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 				    == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 				  ADD : DELETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 		case AifBuManagerEvent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 			aac_handle_aif_bu(dev, aifcmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		 *	If we are waiting on something and this happens to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		 * that thing then set the re-configure flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		if (container != (u32)-1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 			if (container >= dev->maximum_num_containers)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 			if ((dev->fsa_dev[container].config_waiting_on ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 				dev->fsa_dev[container].config_waiting_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		} else for (container = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		    container < dev->maximum_num_containers; ++container) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 			if ((dev->fsa_dev[container].config_waiting_on ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 				dev->fsa_dev[container].config_waiting_on = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	case AifCmdJobProgress:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		 *	These are job progress AIF's. When a Clear is being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		 * done on a container it is initially created then hidden from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		 * the OS. When the clear completes we don't get a config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		 * change so we monitor the job status complete on a clear then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		 * wait for a container change.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		    (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		     ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 			for (container = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 			    container < dev->maximum_num_containers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 			    ++container) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 				 * Stomp on all config sequencing for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 				 * containers?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 				dev->fsa_dev[container].config_waiting_on =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 					AifEnContainerChange;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 				dev->fsa_dev[container].config_needed = ADD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 				dev->fsa_dev[container].config_waiting_stamp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 					jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		    ((__le32 *)aifcmd->data)[6] == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		    ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 			for (container = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 			    container < dev->maximum_num_containers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 			    ++container) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 				 * Stomp on all config sequencing for all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 				 * containers?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 				dev->fsa_dev[container].config_waiting_on =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 					AifEnContainerChange;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 				dev->fsa_dev[container].config_needed = DELETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 				dev->fsa_dev[container].config_waiting_stamp =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 					jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	container = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) retry_next:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	if (device_config_needed == NOTHING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		for (; container < dev->maximum_num_containers; ++container) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 			if ((dev->fsa_dev[container].config_waiting_on == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 			    (dev->fsa_dev[container].config_needed != NOTHING) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 			    time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 				device_config_needed =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 					dev->fsa_dev[container].config_needed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 				dev->fsa_dev[container].config_needed = NOTHING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 				channel = CONTAINER_TO_CHANNEL(container);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 				id = CONTAINER_TO_ID(container);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 				lun = CONTAINER_TO_LUN(container);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	if (device_config_needed == NOTHING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	 *	If we decided that a re-configuration needs to be done,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	 * schedule it here on the way out the door, please close the door
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	 * behind you.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	 *	Find the scsi_device associated with the SCSI address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	 * and mark it as changed, invalidating the cache. This deals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	 * with changes to existing device IDs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	if (!dev || !dev->scsi_host_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	 * force reload of disk info via aac_probe_container
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	if ((channel == CONTAINER_CHANNEL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	  (device_config_needed != NOTHING)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		if (dev->fsa_dev[container].valid == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 			dev->fsa_dev[container].valid = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		aac_probe_container(dev, container);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	if (device) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		switch (device_config_needed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		case DELETE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 			scsi_remove_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 			if (scsi_device_online(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 				scsi_device_set_state(device, SDEV_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 				sdev_printk(KERN_INFO, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 					"Device offlined - %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 					(channel == CONTAINER_CHANNEL) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 						"array deleted" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 						"enclosure services event");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		case ADD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 			if (!scsi_device_online(device)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 				sdev_printk(KERN_INFO, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 					"Device online - %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 					(channel == CONTAINER_CHANNEL) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 						"array created" :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 						"enclosure services event");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 				scsi_device_set_state(device, SDEV_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 			fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		case CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 			if ((channel == CONTAINER_CHANNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 			 && (!dev->fsa_dev[container].valid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 				scsi_remove_device(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 				if (!scsi_device_online(device))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 				scsi_device_set_state(device, SDEV_OFFLINE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 				sdev_printk(KERN_INFO, device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 					"Device offlined - %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 					"array failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 			scsi_rescan_device(&device->sdev_gendev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		scsi_device_put(device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		device_config_needed = NOTHING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	if (device_config_needed == ADD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	if (channel == CONTAINER_CHANNEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		container++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		device_config_needed = NOTHING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		goto retry_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) static void aac_schedule_bus_scan(struct aac_dev *aac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	if (aac->sa_firmware)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		aac_schedule_safw_scan_worker(aac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		aac_schedule_src_reinit_aif_worker(aac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	int index, quirks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	struct Scsi_Host *host = aac->scsi_host_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	int jafo = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	int bled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	u64 dmamask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	int num_of_fibs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	 * Assumptions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	 *	- host is locked, unless called by the aacraid thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	 *	  (a matter of convenience, due to legacy issues surrounding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	 *	  eh_host_adapter_reset).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	 *	- in_reset is asserted, so no new i/o is getting to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	 *	  card.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	 *	- The card is dead, or will be very shortly ;-/ so no new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	 *	  commands are completing in the interrupt service.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	aac_adapter_disable_int(aac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	if (aac->thread && aac->thread->pid != current->pid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		spin_unlock_irq(host->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		kthread_stop(aac->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		aac->thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		jafo = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	 *	If a positive health, means in a known DEAD PANIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	 * state and the adapter could be reset to `try again'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	bled = forced ? 0 : aac_adapter_check_health(aac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	retval = aac_adapter_restart(aac, bled, reset_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	 *	Loop through the fibs, close the synchronous FIBS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	retval = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	num_of_fibs = aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	for (index = 0; index <  num_of_fibs; index++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		struct fib *fib = &aac->fibs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		__le32 XferState = fib->hw_fib_va->header.XferState;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		bool is_response_expected = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		if (!(XferState & cpu_to_le32(NoResponseExpected | Async)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		   (XferState & cpu_to_le32(ResponseExpected)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 			is_response_expected = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 		if (is_response_expected
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		  || fib->flags & FIB_CONTEXT_FLAG_WAIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 			unsigned long flagv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 			spin_lock_irqsave(&fib->event_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 			complete(&fib->event_wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 			spin_unlock_irqrestore(&fib->event_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 			schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 			retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	/* Give some extra time for ioctls to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	if (retval == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		ssleep(2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	index = aac->cardtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	 * Re-initialize the adapter, first free resources, then carefully
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	 * apply the initialization sequence to come back again. Only risk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	 * is a change in Firmware dropping cache, it is assumed the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	 * will ensure that i/o is queisced and the card is flushed in that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	 * case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	aac_free_irq(aac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	aac_fib_map_free(aac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 			  aac->comm_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	aac_adapter_ioremap(aac, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	aac->comm_addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	aac->comm_phys = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	kfree(aac->queues);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	aac->queues = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	kfree(aac->fsa_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	aac->fsa_dev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	dmamask = DMA_BIT_MASK(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	quirks = aac_get_driver_ident(index)->quirks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	if (quirks & AAC_QUIRK_31BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		retval = dma_set_mask(&aac->pdev->dev, dmamask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 	else if (!(quirks & AAC_QUIRK_SRC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		retval = dma_set_mask(&aac->pdev->dev, dmamask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 		retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	if (quirks & AAC_QUIRK_31BIT && !retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		dmamask = DMA_BIT_MASK(31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	if (retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	if (jafo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		aac->thread = kthread_run(aac_command_thread, aac, "%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 					  aac->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		if (IS_ERR(aac->thread)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 			retval = PTR_ERR(aac->thread);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 			aac->thread = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	(void)aac_get_adapter_info(aac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		host->sg_tablesize = 34;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 		host->max_sectors = (host->sg_tablesize * 8) + 112;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		host->sg_tablesize = 17;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 		host->max_sectors = (host->sg_tablesize * 8) + 112;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	aac_get_config_status(aac, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	aac_get_containers(aac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	 * This is where the assumption that the Adapter is quiesced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	 * is important.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	scsi_host_complete_all_commands(host, DID_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	retval = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	aac->in_reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	 * Issue bus rescan to catch any configuration that might have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	 * occurred
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	if (!retval && !is_kdump_kernel()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 		aac_schedule_bus_scan(aac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	if (jafo) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		spin_lock_irq(host->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	unsigned long flagv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	int retval, unblock_retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	struct Scsi_Host *host = aac->scsi_host_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	int bled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	if (aac->in_reset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 		spin_unlock_irqrestore(&aac->fib_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	aac->in_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	spin_unlock_irqrestore(&aac->fib_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	 * Wait for all commands to complete to this specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	 * target (block maximum 60 seconds). Although not necessary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	 * it does make us a good storage citizen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	scsi_host_block(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	/* Quiesce build, flush cache, write through mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	if (forced < 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		aac_send_shutdown(aac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	spin_lock_irqsave(host->host_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	bled = forced ? forced :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 			(aac_check_reset != 0 && aac_check_reset != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	retval = _aac_reset_adapter(aac, bled, reset_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	spin_unlock_irqrestore(host->host_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	unblock_retval = scsi_host_unblock(host, SDEV_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	if (!retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		retval = unblock_retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	if ((forced < 2) && (retval == -ENODEV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		/* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		struct fib * fibctx = aac_fib_alloc(aac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		if (fibctx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 			struct aac_pause *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 			int status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 			aac_fib_init(fibctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 			cmd = (struct aac_pause *) fib_data(fibctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 			cmd->command = cpu_to_le32(VM_ContainerConfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 			cmd->type = cpu_to_le32(CT_PAUSE_IO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			cmd->timeout = cpu_to_le32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 			cmd->min = cpu_to_le32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 			cmd->noRescan = cpu_to_le32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 			cmd->count = cpu_to_le32(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 			status = aac_fib_send(ContainerCommand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 			  fibctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 			  sizeof(struct aac_pause),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 			  FsaNormal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 			  -2 /* Timeout silently */, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 			  NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 			if (status >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 				aac_fib_complete(fibctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 			/* FIB should be freed only after getting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			 * the response from the F/W */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 			if (status != -ERESTARTSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 				aac_fib_free(fibctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) int aac_check_health(struct aac_dev * aac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	int BlinkLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	unsigned long time_now, flagv = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	struct list_head * entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	/* Extending the scope of fib_lock slightly to protect aac->in_reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 	if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 		spin_unlock_irqrestore(&aac->fib_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		return 0; /* OK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	aac->in_reset = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	/* Fake up an AIF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	 *	aac_aifcmd.command = AifCmdEventNotify = 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	 *	aac_aifcmd.seqnum = 0xFFFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	 *	aac_aifcmd.data[0] = AifEnExpEvent = 23
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	 *	aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	 *	aac.aifcmd.data[2] = AifHighPriority = 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	 *	aac.aifcmd.data[3] = BlinkLED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	time_now = jiffies/HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	entry = aac->fib_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	 * For each Context that is on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	 * fibctxList, make a copy of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	 * fib, and then set the event to wake up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	 * thread that is waiting for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	while (entry != &aac->fib_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		 * Extract the fibctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 		struct hw_fib * hw_fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		struct fib * fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		 * Check if the queue is getting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 		 * backlogged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 		if (fibctx->count > 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 			 * It's *not* jiffies folks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 			 * but jiffies / HZ, so do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 			 * panic ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 			u32 time_last = fibctx->jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 			 * Has it been > 2 minutes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 			 * since the last read off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 			 * the queue?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 			if ((time_now - time_last) > aif_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 				entry = entry->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 				aac_close_fib_context(aac, fibctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		 * Warning: no sleep allowed while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 		 * holding spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 		fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 		if (fib && hw_fib) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 			struct aac_aifcmd * aif;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 			fib->hw_fib_va = hw_fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 			fib->dev = aac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 			aac_fib_init(fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 			fib->type = FSAFS_NTC_FIB_CONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 			fib->size = sizeof (struct fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 			fib->data = hw_fib->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 			aif = (struct aac_aifcmd *)hw_fib->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 			aif->command = cpu_to_le32(AifCmdEventNotify);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 			aif->seqnum = cpu_to_le32(0xFFFFFFFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 			((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 			((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 			((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 			((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 			 * Put the FIB onto the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 			 * fibctx's fibs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 			list_add_tail(&fib->fiblink, &fibctx->fib_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 			fibctx->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 			 * Set the event to wake up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 			 * thread that will waiting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 			complete(&fibctx->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 			printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 			kfree(fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 			kfree(hw_fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		entry = entry->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	spin_unlock_irqrestore(&aac->fib_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	if (BlinkLED < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 				aac->name, BlinkLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 	printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	aac->in_reset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	return BlinkLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 	return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 								int bus,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 								int target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	if (bus != CONTAINER_CHANNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		bus = aac_phys_to_logical(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) static int aac_add_safw_device(struct aac_dev *dev, int bus, int target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	if (bus != CONTAINER_CHANNEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		bus = aac_phys_to_logical(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 	return scsi_add_device(dev->scsi_host_ptr, bus, target, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) static void aac_put_safw_scsi_device(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	if (sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		scsi_device_put(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 	sdev = aac_lookup_safw_scsi_device(dev, bus, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	scsi_remove_device(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	aac_put_safw_scsi_device(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	int bus, int target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	return dev->hba_map[bus][target].scan_counter == dev->scan_counter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 	if (is_safw_raid_volume(dev, bus, target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 		return dev->fsa_dev[target].valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 		return aac_is_safw_scan_count_equal(dev, bus, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	int is_exposed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	struct scsi_device *sdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	sdev = aac_lookup_safw_scsi_device(dev, bus, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 	if (sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		is_exposed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	aac_put_safw_scsi_device(sdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	return is_exposed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) static int aac_update_safw_host_devices(struct aac_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 	int bus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	int target;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	int is_exposed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	int rcode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	rcode = aac_setup_safw_adapter(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	if (unlikely(rcode < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		bus = get_bus_number(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		target = get_target_number(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 		is_exposed = aac_is_safw_device_exposed(dev, bus, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 			aac_add_safw_device(dev, bus, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 		else if (!aac_is_safw_target_valid(dev, bus, target) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 								is_exposed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 			aac_remove_safw_device(dev, bus, target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	return rcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) static int aac_scan_safw_host(struct aac_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	int rcode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	rcode = aac_update_safw_host_devices(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	if (rcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		aac_schedule_safw_scan_worker(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	return rcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) int aac_scan_host(struct aac_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	int rcode = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	mutex_lock(&dev->scan_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	if (dev->sa_firmware)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		rcode = aac_scan_safw_host(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 		scsi_scan_host(dev->scsi_host_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	mutex_unlock(&dev->scan_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	return rcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) void aac_src_reinit_aif_worker(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 	struct aac_dev *dev = container_of(to_delayed_work(work),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 				struct aac_dev, src_reinit_aif_worker);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	wait_event(dev->scsi_host_ptr->host_wait,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 			!scsi_host_in_recovery(dev->scsi_host_ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	aac_reinit_aif(dev, dev->cardtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952)  *	aac_handle_sa_aif	Handle a message from the firmware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953)  *	@dev: Which adapter this fib is from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)  *	@fibptr: Pointer to fibptr from adapter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956)  *	This routine handles a driver notify fib from the adapter and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957)  *	dispatches it to the appropriate routine for handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 	u32 events = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		events = SA_AIF_HOTPLUG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	else if (fibptr->hbacmd_size & SA_AIF_HARDWARE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		events = SA_AIF_HARDWARE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		events = SA_AIF_PDEV_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		events = SA_AIF_LDEV_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		events = SA_AIF_BPSTAT_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 	else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		events = SA_AIF_BPCFG_CHANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	switch (events) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	case SA_AIF_HOTPLUG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	case SA_AIF_HARDWARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	case SA_AIF_PDEV_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	case SA_AIF_LDEV_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	case SA_AIF_BPCFG_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 		aac_scan_host(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	case SA_AIF_BPSTAT_CHANGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 		/* currently do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	for (i = 1; i <= 10; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		events = src_readl(dev, MUnit.IDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		if (events & (1<<23)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 			pr_warn(" AIF not cleared by firmware - %d/%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 				i, 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 			ssleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) static int get_fib_count(struct aac_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	unsigned int num = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	struct list_head *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	unsigned long flagv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	 * Warning: no sleep allowed while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	 * holding spinlock. We take the estimate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 	 * and pre-allocate a set of fibs outside the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	 * lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	num = le32_to_cpu(dev->init->r7.adapter_fibs_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 			/ sizeof(struct hw_fib); /* some extra */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	spin_lock_irqsave(&dev->fib_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	entry = dev->fib_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	while (entry != &dev->fib_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		entry = entry->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 		++num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	spin_unlock_irqrestore(&dev->fib_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	return num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 						struct fib **fib_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 						unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	struct hw_fib **hw_fib_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	struct fib **fib_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	hw_fib_p = hw_fib_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	fib_p = fib_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	while (hw_fib_p < &hw_fib_pool[num]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 		*(hw_fib_p) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		if (!(*(hw_fib_p++))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 			--hw_fib_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		*(fib_p) = kmalloc(sizeof(struct fib), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		if (!(*(fib_p++))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 			kfree(*(--hw_fib_p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	 * Get the actual number of allocated fibs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	num = hw_fib_p - hw_fib_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 	return num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) static void wakeup_fibctx_threads(struct aac_dev *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 						struct hw_fib **hw_fib_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 						struct fib **fib_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 						struct fib *fib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 						struct hw_fib *hw_fib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 						unsigned int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	unsigned long flagv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 	struct list_head *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	struct hw_fib **hw_fib_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	struct fib **fib_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 	u32 time_now, time_last;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	struct hw_fib *hw_newfib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	struct fib *newfib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 	struct aac_fib_context *fibctx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	time_now = jiffies/HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 	spin_lock_irqsave(&dev->fib_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	entry = dev->fib_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	 * For each Context that is on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	 * fibctxList, make a copy of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	 * fib, and then set the event to wake up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	 * thread that is waiting for it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	hw_fib_p = hw_fib_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	fib_p = fib_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	while (entry != &dev->fib_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 		 * Extract the fibctx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 		fibctx = list_entry(entry, struct aac_fib_context,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 				next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 		 * Check if the queue is getting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 		 * backlogged
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 		if (fibctx->count > 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 			 * It's *not* jiffies folks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 			 * but jiffies / HZ so do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 			 * panic ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 			time_last = fibctx->jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 			 * Has it been > 2 minutes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 			 * since the last read off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 			 * the queue?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 			if ((time_now - time_last) > aif_timeout) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 				entry = entry->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 				aac_close_fib_context(dev, fibctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		 * Warning: no sleep allowed while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		 * holding spinlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 		if (hw_fib_p >= &hw_fib_pool[num]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 			pr_warn("aifd: didn't allocate NewFib\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 			entry = entry->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		hw_newfib = *hw_fib_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 		*(hw_fib_p++) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 		newfib = *fib_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 		*(fib_p++) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		 * Make the copy of the FIB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		memcpy(newfib, fib, sizeof(struct fib));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 		newfib->hw_fib_va = hw_newfib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 		 * Put the FIB onto the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 		 * fibctx's fibs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		list_add_tail(&newfib->fiblink, &fibctx->fib_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 		fibctx->count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		 * Set the event to wake up the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		 * thread that is waiting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 		complete(&fibctx->completion);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 		entry = entry->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	 *	Set the status of this FIB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	aac_fib_adapter_complete(fib, sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	spin_unlock_irqrestore(&dev->fib_lock, flagv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) static void aac_process_events(struct aac_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 	struct hw_fib *hw_fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	struct fib *fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 	spinlock_t *t_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	t_lock = dev->queues->queue[HostNormCmdQueue].lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	spin_lock_irqsave(t_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 		struct list_head *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 		struct aac_aifcmd *aifcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 		unsigned int  num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 		struct hw_fib **hw_fib_pool, **hw_fib_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		struct fib **fib_pool, **fib_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 		set_current_state(TASK_RUNNING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 		entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		list_del(entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 		t_lock = dev->queues->queue[HostNormCmdQueue].lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 		spin_unlock_irqrestore(t_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		fib = list_entry(entry, struct fib, fiblink);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 		hw_fib = fib->hw_fib_va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 		if (dev->sa_firmware) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 			/* Thor AIF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 			aac_handle_sa_aif(dev, fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 			aac_fib_adapter_complete(fib, (u16)sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 			goto free_fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 		 *	We will process the FIB here or pass it to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 		 *	worker thread that is TBD. We Really can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 		 *	do anything at this point since we don't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 		 *	anything defined for this thread to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 		memset(fib, 0, sizeof(struct fib));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 		fib->type = FSAFS_NTC_FIB_CONTEXT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 		fib->size = sizeof(struct fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		fib->hw_fib_va = hw_fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		fib->data = hw_fib->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		fib->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		 *	We only handle AifRequest fibs from the adapter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 		aifcmd = (struct aac_aifcmd *) hw_fib->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 		if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 			/* Handle Driver Notify Events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 			aac_handle_aif(dev, fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 			*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 			aac_fib_adapter_complete(fib, (u16)sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 			goto free_fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		 * The u32 here is important and intended. We are using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 		 * 32bit wrapping time to fit the adapter field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 		/* Sniff events */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		if (aifcmd->command == cpu_to_le32(AifCmdEventNotify)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 		 || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 			aac_handle_aif(dev, fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 		 * get number of fibs to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 		num = get_fib_count(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		if (!num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 			goto free_fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 		hw_fib_pool = kmalloc_array(num, sizeof(struct hw_fib *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 						GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 		if (!hw_fib_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 			goto free_fib;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		fib_pool = kmalloc_array(num, sizeof(struct fib *), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 		if (!fib_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 			goto free_hw_fib_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		 * Fill up fib pointer pools with actual fibs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 		 * and hw_fibs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 		num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 		if (!num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 			goto free_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		 * wakeup the thread that is waiting for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		 * the response from fw (ioctl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 		wakeup_fibctx_threads(dev, hw_fib_pool, fib_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 							    fib, hw_fib, num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) free_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 		/* Free up the remaining resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 		hw_fib_p = hw_fib_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 		fib_p = fib_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 		while (hw_fib_p < &hw_fib_pool[num]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 			kfree(*hw_fib_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 			kfree(*fib_p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 			++fib_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 			++hw_fib_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		kfree(fib_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) free_hw_fib_pool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		kfree(hw_fib_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) free_fib:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		kfree(fib);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 		t_lock = dev->queues->queue[HostNormCmdQueue].lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 		spin_lock_irqsave(t_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	 *	There are no more AIF's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 	t_lock = dev->queues->queue[HostNormCmdQueue].lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	spin_unlock_irqrestore(t_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 							u32 datasize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	struct aac_srb *srbcmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 	struct sgmap64 *sg64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 	dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 	char *dma_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 	struct fib *fibptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 	int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	u32 vbus, vid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	fibptr = aac_fib_alloc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	if (!fibptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 	dma_buf = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 				     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	if (!dma_buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 		goto fib_free_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 	aac_fib_init(fibptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 	vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	srbcmd = (struct aac_srb *)fib_data(fibptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 	srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	srbcmd->channel = cpu_to_le32(vbus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	srbcmd->id = cpu_to_le32(vid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	srbcmd->lun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	srbcmd->flags = cpu_to_le32(SRB_DataOut);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	srbcmd->timeout = cpu_to_le32(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	srbcmd->retry_limit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 	srbcmd->cdb_size = cpu_to_le32(12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	srbcmd->count = cpu_to_le32(datasize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) 	srbcmd->cdb[0] = BMIC_OUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	srbcmd->cdb[6] = WRITE_HOST_WELLNESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	memcpy(dma_buf, (char *)wellness_str, datasize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 	sg64 = (struct sgmap64 *)&srbcmd->sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 	sg64->count = cpu_to_le32(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 	sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) 	sg64->sg[0].count = cpu_to_le32(datasize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) 	ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 				FsaNormal, 1, 1, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 	dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	 * Do not set XferState to zero unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 	 * receives a response from F/W
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		aac_fib_complete(fibptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	 * FIB should be freed only after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	 * getting the response from the F/W
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	if (ret != -ERESTARTSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 		goto fib_free_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) fib_free_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	aac_fib_free(fibptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) static int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	struct tm cur_tm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 	u32 datasize = sizeof(wellness_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	time64_t local_time;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 	int ret = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 	if (!dev->sa_firmware)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	time64_to_tm(local_time, 0, &cur_tm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	cur_tm.tm_mon += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 	cur_tm.tm_year += 1900;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	wellness_str[8] = bin2bcd(cur_tm.tm_hour);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	wellness_str[9] = bin2bcd(cur_tm.tm_min);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	wellness_str[10] = bin2bcd(cur_tm.tm_sec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 	wellness_str[12] = bin2bcd(cur_tm.tm_mon);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 	wellness_str[13] = bin2bcd(cur_tm.tm_mday);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	wellness_str[14] = bin2bcd(cur_tm.tm_year / 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	wellness_str[15] = bin2bcd(cur_tm.tm_year % 100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	ret = aac_send_wellness_command(dev, wellness_str, datasize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) static int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	struct fib *fibptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	__le32 *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	fibptr = aac_fib_alloc(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	if (!fibptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	aac_fib_init(fibptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	info = (__le32 *)fib_data(fibptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	*info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 	ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 					1, 1, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	 * Do not set XferState to zero unless
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	 * receives a response from F/W
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	if (ret >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 		aac_fib_complete(fibptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 	 * FIB should be freed only after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	 * getting the response from the F/W
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	if (ret != -ERESTARTSYS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 		aac_fib_free(fibptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419)  *	aac_command_thread	-	command processing thread
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420)  *	@data: Adapter to monitor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422)  *	Waits on the commandready event in it's queue. When the event gets set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423)  *	it will pull FIBs off it's queue. It will continue to pull FIBs off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424)  *	until the queue is empty. When the queue is empty it will wait for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425)  *	more FIBs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) int aac_command_thread(void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	struct aac_dev *dev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 	DECLARE_WAITQUEUE(wait, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	unsigned long next_jiffies = jiffies + HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 	unsigned long next_check_jiffies = next_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	long difference = HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 	 *	We can only have one thread per adapter for AIF's.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 	if (dev->aif_thread)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 	 *	Let the DPC know it has a place to send the AIF's to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 	dev->aif_thread = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	dprintk ((KERN_INFO "aac_command_thread start\n"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 		aac_process_events(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 		 *	Background activity
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 		if ((time_before(next_check_jiffies,next_jiffies))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 		 && ((difference = next_check_jiffies - jiffies) <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 			next_check_jiffies = next_jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 			if (aac_adapter_check_health(dev) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 				difference = ((long)(unsigned)check_interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 					   * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 				next_check_jiffies = jiffies + difference;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 			} else if (!dev->queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 		if (!time_before(next_check_jiffies,next_jiffies)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 		 && ((difference = next_jiffies - jiffies) <= 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 			struct timespec64 now;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 			int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 			/* Don't even try to talk to adapter if its sick */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 			ret = aac_adapter_check_health(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 			if (ret || !dev->queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 			next_check_jiffies = jiffies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 					   + ((long)(unsigned)check_interval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 					   * HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 			ktime_get_real_ts64(&now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 			/* Synchronize our watches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 			if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 			 && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 				difference = HZ + HZ / 2 -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 					     now.tv_nsec / (NSEC_PER_SEC / HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 			else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 				if (now.tv_nsec > NSEC_PER_SEC / 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 					++now.tv_sec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 				if (dev->sa_firmware)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 					ret =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 					aac_send_safw_hostttime(dev, &now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 					ret = aac_send_hosttime(dev, &now);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 				difference = (long)(unsigned)update_interval*HZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 			next_jiffies = jiffies + difference;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 			if (time_before(next_check_jiffies,next_jiffies))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 				difference = next_check_jiffies - jiffies;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 		if (difference <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 			difference = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 		set_current_state(TASK_INTERRUPTIBLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 		if (kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 		 * we probably want usleep_range() here instead of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 		 * jiffies computation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 		schedule_timeout(difference);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 		if (kthread_should_stop())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	if (dev->queues)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 		remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	dev->aif_thread = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) int aac_acquire_irq(struct aac_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 	int j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 	if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 		for (i = 0; i < dev->max_msix; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 			dev->aac_msix[i].vector_no = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 			dev->aac_msix[i].dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 			if (request_irq(pci_irq_vector(dev->pdev, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 					dev->a_ops.adapter_intr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 					0, "aacraid", &(dev->aac_msix[i]))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 				printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 						dev->name, dev->id, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 				for (j = 0 ; j < i ; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 					free_irq(pci_irq_vector(dev->pdev, j),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 						 &(dev->aac_msix[j]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 				pci_disable_msix(dev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 				ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 		dev->aac_msix[0].vector_no = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 		dev->aac_msix[0].dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 		if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 			IRQF_SHARED, "aacraid",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 			&(dev->aac_msix[0])) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 			if (dev->msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 				pci_disable_msi(dev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 			printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 					dev->name, dev->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 			ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) void aac_free_irq(struct aac_dev *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	if (aac_is_src(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 		if (dev->max_msix > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 			for (i = 0; i < dev->max_msix; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 				free_irq(pci_irq_vector(dev->pdev, i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 					 &(dev->aac_msix[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 			free_irq(dev->pdev->irq, &(dev->aac_msix[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 		free_irq(dev->pdev->irq, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 	if (dev->msi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 		pci_disable_msi(dev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 	else if (dev->max_msix > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 		pci_disable_msix(dev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) }