Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  *  linux/drivers/scsi/esas2r/esas2r_ioctl.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *      For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Copyright (c) 2001-2013 ATTO Technology, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *  (mailto:linuxdrivers@attotech.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * modify it under the terms of the GNU General Public License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * as published by the Free Software Foundation; either version 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * of the License, or (at your option) any later version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * This program is distributed in the hope that it will be useful,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * but WITHOUT ANY WARRANTY; without even the implied warranty of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * GNU General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * NO WARRANTY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * solely responsible for determining the appropriateness of using and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * distributing the Program and assumes all risks associated with its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  * exercise of rights under this Agreement, including but not limited to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26)  * the risks and costs of program errors, damage to or loss of data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27)  * programs or equipment, and unavailability or interruption of operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29)  * DISCLAIMER OF LIABILITY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30)  * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31)  * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * You should have received a copy of the GNU General Public License
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * along with this program; if not, write to the Free Software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * USA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #include "esas2r.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * Buffered ioctl handlers.  A buffered ioctl is one which requires that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  * allocate a DMA-able memory area to communicate with the firmware.  In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  * order to prevent continually allocating and freeing consistent memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * we will allocate a global buffer the first time we need it and re-use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * it for subsequent ioctl calls that require it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) u8 *esas2r_buffered_ioctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) dma_addr_t esas2r_buffered_ioctl_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) u32 esas2r_buffered_ioctl_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) struct pci_dev *esas2r_buffered_ioctl_pcid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) static DEFINE_SEMAPHORE(buffered_ioctl_semaphore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 				       struct esas2r_request *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 				       struct esas2r_sg_context *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 				       void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 					     struct esas2r_request *, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) struct esas2r_buffered_ioctl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	struct esas2r_adapter *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	void *ioctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	u32 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	u32 control_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	BUFFERED_IOCTL_CALLBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 		callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	void *context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	BUFFERED_IOCTL_DONE_CALLBACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 		done_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	void *done_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) static void complete_fm_api_req(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 				struct esas2r_request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	a->fm_api_command_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	wake_up_interruptible(&a->fm_api_waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) /* Callbacks for building scatter/gather lists for FM API requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	int offset = sgc->cur_offset - a->save_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	(*addr) = a->firmware.phys + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	return a->firmware.orig_len - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	int offset = sgc->cur_offset - a->save_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	(*addr) = a->firmware.header_buff_phys + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	return sizeof(struct esas2r_flash_img) - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) /* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	struct esas2r_request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	if (mutex_lock_interruptible(&a->fm_api_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 		fi->status = FI_STAT_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	rq = esas2r_alloc_request(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	if (rq == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 		fi->status = FI_STAT_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		goto free_sem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	if (fi == &a->firmware.header) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 		a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 							     (size_t)sizeof(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 								     struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 								     esas2r_flash_img),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 							     (dma_addr_t *)&a->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 							     firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 							     header_buff_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 							     GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 		if (a->firmware.header_buff == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 			esas2r_debug("failed to allocate header buffer!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 			fi->status = FI_STAT_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 			goto free_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 		memcpy(a->firmware.header_buff, fi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 		       sizeof(struct esas2r_flash_img));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 		a->save_offset = a->firmware.header_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 		a->fm_api_sgc.get_phys_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 			(PGETPHYSADDR)get_physaddr_fm_api_header;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 		a->save_offset = (u8 *)fi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 		a->fm_api_sgc.get_phys_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 			(PGETPHYSADDR)get_physaddr_fm_api;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	rq->comp_cb = complete_fm_api_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	a->fm_api_command_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	a->fm_api_sgc.cur_offset = a->save_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 			   &a->fm_api_sgc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		goto all_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	/* Now wait around for it to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	while (!a->fm_api_command_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 		wait_event_interruptible(a->fm_api_waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 					 a->fm_api_command_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) all_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	if (fi == &a->firmware.header) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		memcpy(fi, a->firmware.header_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		       sizeof(struct esas2r_flash_img));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		dma_free_coherent(&a->pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 				  (size_t)sizeof(struct esas2r_flash_img),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 				  a->firmware.header_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 				  (dma_addr_t)a->firmware.header_buff_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) free_req:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	esas2r_free_request(a, (struct esas2r_request *)rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) free_sem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	mutex_unlock(&a->fm_api_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) static void complete_nvr_req(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 			     struct esas2r_request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	a->nvram_command_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	wake_up_interruptible(&a->nvram_waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) /* Callback for building scatter/gather lists for buffered ioctls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 				       u64 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	(*addr) = esas2r_buffered_ioctl_addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	return esas2r_buffered_ioctl_size - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) static void complete_buffered_ioctl_req(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 					struct esas2r_request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	a->buffered_ioctl_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	wake_up_interruptible(&a->buffered_ioctl_waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	struct esas2r_adapter *a = bi->a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	struct esas2r_request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	struct esas2r_sg_context sgc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	u8 result = IOCTL_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	if (down_interruptible(&buffered_ioctl_semaphore))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		return IOCTL_OUT_OF_RESOURCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	/* allocate a buffer or use the existing buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	if (esas2r_buffered_ioctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		if (esas2r_buffered_ioctl_size < bi->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 			/* free the too-small buffer and get a new one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 			dma_free_coherent(&a->pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 					  (size_t)esas2r_buffered_ioctl_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 					  esas2r_buffered_ioctl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 					  esas2r_buffered_ioctl_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 			goto allocate_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) allocate_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		esas2r_buffered_ioctl_size = bi->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		esas2r_buffered_ioctl_pcid = a->pcid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 							   (size_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 							   esas2r_buffered_ioctl_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 							   &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 							   esas2r_buffered_ioctl_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 							   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	if (!esas2r_buffered_ioctl) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		esas2r_log(ESAS2R_LOG_CRIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 			   "could not allocate %d bytes of consistent memory "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 			   "for a buffered ioctl!",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 			   bi->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 		esas2r_debug("buffered ioctl alloc failure");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		result = IOCTL_OUT_OF_RESOURCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		goto exit_cleanly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	rq = esas2r_alloc_request(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	if (rq == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		esas2r_log(ESAS2R_LOG_CRIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 			   "could not allocate an internal request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		result = IOCTL_OUT_OF_RESOURCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		esas2r_debug("buffered ioctl - no requests");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		goto exit_cleanly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	a->buffered_ioctl_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	rq->comp_cb = complete_buffered_ioctl_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	sgc.cur_offset = esas2r_buffered_ioctl + bi->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	sgc.length = esas2r_buffered_ioctl_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	if (!(*bi->callback)(a, rq, &sgc, bi->context)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		/* completed immediately, no need to wait */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		a->buffered_ioctl_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		goto free_andexit_cleanly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	/* now wait around for it to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	while (!a->buffered_ioctl_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		wait_event_interruptible(a->buffered_ioctl_waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 					 a->buffered_ioctl_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) free_andexit_cleanly:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	if (result == IOCTL_SUCCESS && bi->done_callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		(*bi->done_callback)(a, rq, bi->done_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	esas2r_free_request(a, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) exit_cleanly:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	if (result == IOCTL_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	up(&buffered_ioctl_semaphore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) /* SMP ioctl support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) static int smp_ioctl_callback(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 			      struct esas2r_request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 			      struct esas2r_sg_context *sgc, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	struct atto_ioctl_smp *si =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		(struct atto_ioctl_smp *)esas2r_buffered_ioctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	if (!esas2r_build_sg_list(a, rq, sgc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		si->status = ATTO_STS_OUT_OF_RSRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	esas2r_start_request(a, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	struct esas2r_buffered_ioctl bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	memset(&bi, 0, sizeof(bi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	bi.a = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	bi.ioctl = si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	bi.length = sizeof(struct atto_ioctl_smp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		    + le32_to_cpu(si->req_length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		    + le32_to_cpu(si->rsp_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	bi.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	bi.callback = smp_ioctl_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	return handle_buffered_ioctl(&bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) /* CSMI ioctl support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 					     struct esas2r_request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	/* Now call the original completion callback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	(*rq->aux_req_cb)(a, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) /* Tunnel a CSMI IOCTL to the back end driver for processing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) static bool csmi_ioctl_tunnel(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 			      union atto_ioctl_csmi *ci,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 			      struct esas2r_request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 			      struct esas2r_sg_context *sgc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			      u32 ctrl_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 			      u16 target_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	if (test_bit(AF_DEGRADED_MODE, &a->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	ioctl->csmi.target_id = cpu_to_le16(target_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	 * Always usurp the completion callback since the interrupt callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	 * mechanism may be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	rq->aux_req_cx = ci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	rq->aux_req_cb = rq->comp_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	if (!esas2r_build_sg_list(a, rq, sgc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	esas2r_start_request(a, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) static bool check_lun(struct scsi_lun lun)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	bool result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	result = ((lun.scsi_lun[7] == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		  (lun.scsi_lun[6] == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		  (lun.scsi_lun[5] == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		  (lun.scsi_lun[4] == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		  (lun.scsi_lun[3] == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		  (lun.scsi_lun[2] == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) /* Byte 1 is intentionally skipped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		  (lun.scsi_lun[0] == 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) static int csmi_ioctl_callback(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 			       struct esas2r_request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			       struct esas2r_sg_context *sgc, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	struct atto_csmi *ci = (struct atto_csmi *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	union atto_ioctl_csmi *ioctl_csmi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		(union atto_ioctl_csmi *)esas2r_buffered_ioctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	u8 path = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	u8 tid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	u8 lun = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	u32 sts = CSMI_STS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	struct esas2r_target *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	if (ci->control_code == CSMI_CC_GET_DEV_ADDR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		path = gda->path_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		tid = gda->target_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		lun = gda->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	} else if (ci->control_code == CSMI_CC_TASK_MGT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		path = tm->path_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		tid = tm->target_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		lun = tm->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	if (path > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 			CSMI_STS_INV_PARAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	rq->target_id = tid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	rq->vrq->scsi.flags |= cpu_to_le32(lun);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	switch (ci->control_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	case CSMI_CC_GET_DRVR_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		strcpy(gdi->description, esas2r_get_model_name(a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		gdi->csmi_major_rev = CSMI_MAJOR_REV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		gdi->csmi_minor_rev = CSMI_MINOR_REV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	case CSMI_CC_GET_CNTLR_CFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		gcc->base_io_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 				      &gcc->base_memaddr_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 				      &gcc->base_memaddr_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		gcc->board_id = MAKEDWORD(a->pcid->subsystem_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 					  a->pcid->subsystem_vendor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 		gcc->io_bus_type = CSMI_BUS_TYPE_PCI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		gcc->pci_addr.bus_num = a->pcid->bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		memset(gcc->serial_num, 0, sizeof(gcc->serial_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		gcc->major_rev = LOBYTE(LOWORD(a->fw_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		gcc->minor_rev = HIBYTE(LOWORD(a->fw_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 		gcc->build_rev = LOBYTE(HIWORD(a->fw_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		gcc->release_rev = HIBYTE(HIWORD(a->fw_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		gcc->bios_build_rev = LOWORD(a->flash_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		if (test_bit(AF2_THUNDERLINK, &a->flags2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 					   | CSMI_CNTLRF_SATA_HBA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 			gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 					   | CSMI_CNTLRF_SATA_RAID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		gcc->rrom_major_rev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		gcc->rrom_minor_rev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		gcc->rrom_build_rev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		gcc->rrom_release_rev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		gcc->rrom_biosmajor_rev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		gcc->rrom_biosminor_rev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		gcc->rrom_biosbuild_rev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		gcc->rrom_biosrelease_rev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	case CSMI_CC_GET_CNTLR_STS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		if (test_bit(AF_DEGRADED_MODE, &a->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 			gcs->status = CSMI_CNTLR_STS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 			gcs->status = CSMI_CNTLR_STS_GOOD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		gcs->offline_reason = CSMI_OFFLINE_NO_REASON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	case CSMI_CC_FW_DOWNLOAD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	case CSMI_CC_GET_RAID_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	case CSMI_CC_GET_RAID_CFG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		sts = CSMI_STS_BAD_CTRL_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	case CSMI_CC_SMP_PASSTHRU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	case CSMI_CC_SSP_PASSTHRU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	case CSMI_CC_STP_PASSTHRU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	case CSMI_CC_GET_PHY_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	case CSMI_CC_SET_PHY_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	case CSMI_CC_GET_LINK_ERRORS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	case CSMI_CC_GET_SATA_SIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	case CSMI_CC_GET_CONN_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	case CSMI_CC_PHY_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 				       ci->control_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 				       ESAS2R_TARG_ID_INV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 			sts = CSMI_STS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	case CSMI_CC_GET_SCSI_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 		struct scsi_lun lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		if (!check_lun(lun)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 			sts = CSMI_STS_NO_SCSI_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		/* make sure the device is present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		spin_lock_irqsave(&a->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		spin_unlock_irqrestore(&a->mem_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		if (t == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			sts = CSMI_STS_NO_SCSI_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		gsa->host_index = 0xFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		gsa->lun = gsa->sas_lun[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		rq->target_id = esas2r_targ_get_id(t, a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	case CSMI_CC_GET_DEV_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 		struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		/* make sure the target is present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		t = a->targetdb + rq->target_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		if (t >= a->targetdb_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		    || t->target_state != TS_PRESENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		    || t->sas_addr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			sts = CSMI_STS_NO_DEV_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		/* fill in the result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		*(u64 *)gda->sas_addr = t->sas_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		memset(gda->sas_lun, 0, sizeof(gda->sas_lun));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	case CSMI_CC_TASK_MGT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		/* make sure the target is present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		t = a->targetdb + rq->target_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		if (t >= a->targetdb_end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		    || t->target_state != TS_PRESENT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		    || !(t->flags & TF_PASS_THRU)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 			sts = CSMI_STS_NO_DEV_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 				       ci->control_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 				       t->phys_targ_id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 			sts = CSMI_STS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		sts = CSMI_STS_BAD_CTRL_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) static void csmi_ioctl_done_callback(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 				     struct esas2r_request *rq, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	struct atto_csmi *ci = (struct atto_csmi *)context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	union atto_ioctl_csmi *ioctl_csmi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		(union atto_ioctl_csmi *)esas2r_buffered_ioctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	switch (ci->control_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	case CSMI_CC_GET_DRVR_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		struct atto_csmi_get_driver_info *gdi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 			&ioctl_csmi->drvr_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		strcpy(gdi->name, ESAS2R_VERSION_STR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		gdi->major_rev = ESAS2R_MAJOR_REV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		gdi->minor_rev = ESAS2R_MINOR_REV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		gdi->build_rev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 		gdi->release_rev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	case CSMI_CC_GET_SCSI_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 		struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		    CSMI_STS_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 			gsa->target_id = rq->target_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 			gsa->path_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	struct esas2r_buffered_ioctl bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	memset(&bi, 0, sizeof(bi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	bi.a = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	bi.ioctl = &ci->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	bi.length = sizeof(union atto_ioctl_csmi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	bi.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	bi.callback = csmi_ioctl_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	bi.context = ci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	bi.done_callback = csmi_ioctl_done_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	bi.done_context = ci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	return handle_buffered_ioctl(&bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) /* ATTO HBA ioctl support */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) /* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) static bool hba_ioctl_tunnel(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 			     struct atto_ioctl *hi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 			     struct esas2r_request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			     struct esas2r_sg_context *sgc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	if (!esas2r_build_sg_list(a, rq, sgc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		hi->status = ATTO_STS_OUT_OF_RSRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	esas2r_start_request(a, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) static void scsi_passthru_comp_cb(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 				  struct esas2r_request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	u8 sts = ATTO_SPT_RS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	spt->sense_length = rq->sense_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	spt->residual_length =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	switch (rq->req_stat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	case RS_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	case RS_SCSI_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		sts = ATTO_SPT_RS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	case RS_UNDERRUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 		sts = ATTO_SPT_RS_UNDERRUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	case RS_OVERRUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		sts = ATTO_SPT_RS_OVERRUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	case RS_SEL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	case RS_SEL2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		sts = ATTO_SPT_RS_NO_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	case RS_NO_LUN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		sts = ATTO_SPT_RS_NO_LUN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	case RS_TIMEOUT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		sts = ATTO_SPT_RS_TIMEOUT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	case RS_DEGRADED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 		sts = ATTO_SPT_RS_DEGRADED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	case RS_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		sts = ATTO_SPT_RS_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	case RS_ABORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		sts = ATTO_SPT_RS_ABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	case RS_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 		sts = ATTO_SPT_RS_BUS_RESET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	spt->req_status = sts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	/* Update the target ID to the next one present. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	spt->target_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		esas2r_targ_db_find_next_present(a, (u16)spt->target_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	/* Done, call the completion callback. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	(*rq->aux_req_cb)(a, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) static int hba_ioctl_callback(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 			      struct esas2r_request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 			      struct esas2r_sg_context *sgc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 			      void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	hi->status = ATTO_STS_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	switch (hi->function) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	case ATTO_FUNC_GET_ADAP_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		u8 *class_code = (u8 *)&a->pcid->class;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		struct atto_hba_get_adapter_info *gai =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 			&hi->data.get_adap_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		if (hi->flags & HBAF_TUNNEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			hi->status = ATTO_STS_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 		if (hi->version > ATTO_VER_GET_ADAP_INFO0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			hi->status = ATTO_STS_INV_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			hi->version = ATTO_VER_GET_ADAP_INFO0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		memset(gai, 0, sizeof(*gai));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		gai->pci.vendor_id = a->pcid->vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		gai->pci.device_id = a->pcid->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		gai->pci.ss_vendor_id = a->pcid->subsystem_vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		gai->pci.ss_device_id = a->pcid->subsystem_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 		gai->pci.class_code[0] = class_code[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		gai->pci.class_code[1] = class_code[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		gai->pci.class_code[2] = class_code[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		gai->pci.rev_id = a->pcid->revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		gai->pci.bus_num = a->pcid->bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		gai->pci.dev_num = PCI_SLOT(a->pcid->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		gai->pci.func_num = PCI_FUNC(a->pcid->devfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		if (pci_is_pcie(a->pcid)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 			u16 stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			u32 caps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 			pcie_capability_read_word(a->pcid, PCI_EXP_LNKSTA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 						  &stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			pcie_capability_read_dword(a->pcid, PCI_EXP_LNKCAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 						   &caps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			gai->pci.link_speed_curr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 				(u8)(stat & PCI_EXP_LNKSTA_CLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			gai->pci.link_speed_max =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 				(u8)(caps & PCI_EXP_LNKCAP_SLS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 			gai->pci.link_width_curr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 				(u8)((stat & PCI_EXP_LNKSTA_NLW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 				     >> PCI_EXP_LNKSTA_NLW_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			gai->pci.link_width_max =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 				(u8)((caps & PCI_EXP_LNKCAP_MLW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 				     >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		gai->pci.msi_vector_cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		if (a->pcid->msix_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		else if (a->pcid->msi_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		gai->adap_type = ATTO_GAI_AT_ESASRAID2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		if (test_bit(AF2_THUNDERLINK, &a->flags2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 			gai->adap_type = ATTO_GAI_AT_TLSASHBA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		if (test_bit(AF_DEGRADED_MODE, &a->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			gai->adap_flags |= ATTO_GAI_AF_DEGRADED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 				   ATTO_GAI_AF_DEVADDR_SUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 		if (a->pcid->subsystem_device == ATTO_ESAS_R60F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		    || a->pcid->subsystem_device == ATTO_ESAS_R608
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		    || a->pcid->subsystem_device == ATTO_ESAS_R644
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		    || a->pcid->subsystem_device == ATTO_TSSC_3808E)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 			gai->adap_flags |= ATTO_GAI_AF_VIRT_SES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		gai->num_ports = ESAS2R_NUM_PHYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		gai->num_phys = ESAS2R_NUM_PHYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 		strcpy(gai->firmware_rev, a->fw_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		strcpy(gai->flash_rev, a->flash_rev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		strcpy(gai->model_name_short, esas2r_get_model_name_short(a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		strcpy(gai->model_name, esas2r_get_model_name(a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		gai->num_targets = ESAS2R_MAX_TARGETS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		gai->num_busses = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		gai->num_targsper_bus = gai->num_targets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		gai->num_lunsper_targ = 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		if (a->pcid->subsystem_device == ATTO_ESAS_R6F0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		    || a->pcid->subsystem_device == ATTO_ESAS_R60F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 			gai->num_connectors = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 			gai->num_connectors = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		gai->num_targets_backend = a->num_targets_backend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		gai->tunnel_flags = a->ioctl_tunnel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 				    & (ATTO_GAI_TF_MEM_RW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 				       | ATTO_GAI_TF_TRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 				       | ATTO_GAI_TF_SCSI_PASS_THRU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 				       | ATTO_GAI_TF_GET_DEV_ADDR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 				       | ATTO_GAI_TF_PHY_CTRL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 				       | ATTO_GAI_TF_CONN_CTRL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 				       | ATTO_GAI_TF_GET_DEV_INFO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	case ATTO_FUNC_GET_ADAP_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		struct atto_hba_get_adapter_address *gaa =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 			&hi->data.get_adap_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		if (hi->flags & HBAF_TUNNEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			hi->status = ATTO_STS_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		if (hi->version > ATTO_VER_GET_ADAP_ADDR0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			hi->status = ATTO_STS_INV_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			hi->version = ATTO_VER_GET_ADAP_ADDR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		} else if (gaa->addr_type == ATTO_GAA_AT_PORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			   || gaa->addr_type == ATTO_GAA_AT_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			if (gaa->addr_type == ATTO_GAA_AT_PORT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			    && gaa->port_id >= ESAS2R_NUM_PHYS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 				hi->status = ATTO_STS_NOT_APPL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 				memcpy((u64 *)gaa->address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 				       &a->nvram->sas_addr[0], sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 				gaa->addr_len = sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 			hi->status = ATTO_STS_INV_PARAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	case ATTO_FUNC_MEM_RW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 		if (hi->flags & HBAF_TUNNEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 			if (hba_ioctl_tunnel(a, hi, rq, sgc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 				return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		hi->status = ATTO_STS_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	case ATTO_FUNC_TRACE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		struct atto_hba_trace *trc = &hi->data.trace;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		if (hi->flags & HBAF_TUNNEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 			if (hba_ioctl_tunnel(a, hi, rq, sgc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 				return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		if (hi->version > ATTO_VER_TRACE1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 			hi->status = ATTO_STS_INV_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 			hi->version = ATTO_VER_TRACE1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		    && hi->version >= ATTO_VER_TRACE1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 			if (trc->trace_func == ATTO_TRC_TF_UPLOAD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 				u32 len = hi->data_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 				u32 offset = trc->current_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 				u32 total_len = ESAS2R_FWCOREDUMP_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 				/* Size is zero if a core dump isn't present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 				if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 					total_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 				if (len > total_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 					len = total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 				if (offset >= total_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 				    || offset + len > total_len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 				    || len == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 					hi->status = ATTO_STS_INV_PARAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 				memcpy(trc + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 				       a->fw_coredump_buff + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 				       len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 				hi->data_length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			} else if (trc->trace_func == ATTO_TRC_TF_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 				memset(a->fw_coredump_buff, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 				       ESAS2R_FWCOREDUMP_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 				clear_bit(AF2_COREDUMP_SAVED, &a->flags2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 			} else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 				hi->status = ATTO_STS_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 			/* Always return all the info we can. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			trc->trace_mask = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			trc->current_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 			trc->total_length = ESAS2R_FWCOREDUMP_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			/* Return zero length buffer if core dump not present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 			if (!test_bit(AF2_COREDUMP_SAVED, &a->flags2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 				trc->total_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			hi->status = ATTO_STS_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	case ATTO_FUNC_SCSI_PASS_THRU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		struct scsi_lun lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		memcpy(&lun, spt->lun, sizeof(struct scsi_lun));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		if (hi->flags & HBAF_TUNNEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 			if (hba_ioctl_tunnel(a, hi, rq, sgc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 				return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		if (hi->version > ATTO_VER_SCSI_PASS_THRU0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			hi->status = ATTO_STS_INV_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 			hi->version = ATTO_VER_SCSI_PASS_THRU0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 			hi->status = ATTO_STS_INV_PARAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		esas2r_sgc_init(sgc, a, rq, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		sgc->length = hi->data_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		sgc->cur_offset += offsetof(struct atto_ioctl, data.byte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 				   + sizeof(struct atto_hba_scsi_pass_thru);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		/* Finish request initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		rq->target_id = (u16)spt->target_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 		rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		memcpy(rq->vrq->scsi.cdb, spt->cdb, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		rq->vrq->scsi.length = cpu_to_le32(hi->data_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		rq->sense_len = spt->sense_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		rq->sense_buf = (u8 *)spt->sense_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		/* NOTE: we ignore spt->timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		 * always usurp the completion callback since the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		 * callback mechanism may be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		rq->aux_req_cx = hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		rq->aux_req_cb = rq->comp_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		rq->comp_cb = scsi_passthru_comp_cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		if (spt->flags & ATTO_SPTF_DATA_IN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		} else if (spt->flags & ATTO_SPTF_DATA_OUT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 			if (sgc->length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 				hi->status = ATTO_STS_INV_PARAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		if (spt->flags & ATTO_SPTF_ORDERED_Q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 			rq->vrq->scsi.flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 				cpu_to_le32(FCP_CMND_TA_ORDRD_Q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		else if (spt->flags & ATTO_SPTF_HEAD_OF_Q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		if (!esas2r_build_sg_list(a, rq, sgc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			hi->status = ATTO_STS_OUT_OF_RSRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 		esas2r_start_request(a, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	case ATTO_FUNC_GET_DEV_ADDR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		struct atto_hba_get_device_address *gda =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			&hi->data.get_dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		struct esas2r_target *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		if (hi->flags & HBAF_TUNNEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			if (hba_ioctl_tunnel(a, hi, rq, sgc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 				return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 		if (hi->version > ATTO_VER_GET_DEV_ADDR0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			hi->status = ATTO_STS_INV_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 			hi->version = ATTO_VER_GET_DEV_ADDR0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		if (gda->target_id >= ESAS2R_MAX_TARGETS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 			hi->status = ATTO_STS_INV_PARAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		t = a->targetdb + (u16)gda->target_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		if (t->target_state != TS_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			hi->status = ATTO_STS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		} else if (gda->addr_type == ATTO_GDA_AT_PORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			if (t->sas_addr == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 				hi->status = ATTO_STS_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 				*(u64 *)gda->address = t->sas_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 				gda->addr_len = sizeof(u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		} else if (gda->addr_type == ATTO_GDA_AT_NODE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			hi->status = ATTO_STS_NOT_APPL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			hi->status = ATTO_STS_INV_PARAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		/* update the target ID to the next one present. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		gda->target_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 			esas2r_targ_db_find_next_present(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 							 (u16)gda->target_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	case ATTO_FUNC_PHY_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	case ATTO_FUNC_CONN_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		if (hba_ioctl_tunnel(a, hi, rq, sgc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	case ATTO_FUNC_ADAP_CTRL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		if (hi->flags & HBAF_TUNNEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 			hi->status = ATTO_STS_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		if (hi->version > ATTO_VER_ADAP_CTRL0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 			hi->status = ATTO_STS_INV_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 			hi->version = ATTO_VER_ADAP_CTRL0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		if (ac->adap_func == ATTO_AC_AF_HARD_RST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 			esas2r_reset_adapter(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		} else if (ac->adap_func != ATTO_AC_AF_GET_STATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 			hi->status = ATTO_STS_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		if (test_bit(AF_CHPRST_NEEDED, &a->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			ac->adap_state = ATTO_AC_AS_RST_SCHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		else if (test_bit(AF_CHPRST_PENDING, &a->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 			ac->adap_state = ATTO_AC_AS_RST_IN_PROG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		else if (test_bit(AF_DISC_PENDING, &a->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 			ac->adap_state = ATTO_AC_AS_RST_DISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 		else if (test_bit(AF_DISABLED, &a->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			ac->adap_state = ATTO_AC_AS_DISABLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		else if (test_bit(AF_DEGRADED_MODE, &a->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 			ac->adap_state = ATTO_AC_AS_DEGRADED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 			ac->adap_state = ATTO_AC_AS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	case ATTO_FUNC_GET_DEV_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		struct esas2r_target *t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		if (hi->flags & HBAF_TUNNEL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			if (hba_ioctl_tunnel(a, hi, rq, sgc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 				return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		if (hi->version > ATTO_VER_GET_DEV_INFO0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 			hi->status = ATTO_STS_INV_VERSION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			hi->version = ATTO_VER_GET_DEV_INFO0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		if (gdi->target_id >= ESAS2R_MAX_TARGETS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 			hi->status = ATTO_STS_INV_PARAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		t = a->targetdb + (u16)gdi->target_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		/* update the target ID to the next one present. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		gdi->target_id =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			esas2r_targ_db_find_next_present(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 							 (u16)gdi->target_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		if (t->target_state != TS_PRESENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 			hi->status = ATTO_STS_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		hi->status = ATTO_STS_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		hi->status = ATTO_STS_INV_FUNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) static void hba_ioctl_done_callback(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 				    struct esas2r_request *rq, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	struct atto_ioctl *ioctl_hba =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		(struct atto_ioctl *)esas2r_buffered_ioctl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	esas2r_debug("hba_ioctl_done_callback %d", a->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		struct atto_hba_get_adapter_info *gai =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 			&ioctl_hba->data.get_adap_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		esas2r_debug("ATTO_FUNC_GET_ADAP_INFO");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		gai->drvr_rev_major = ESAS2R_MAJOR_REV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		gai->drvr_rev_minor = ESAS2R_MINOR_REV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		strcpy(gai->drvr_name, ESAS2R_DRVR_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		gai->num_busses = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		gai->num_targsper_bus = ESAS2R_MAX_ID + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		gai->num_lunsper_targ = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) u8 handle_hba_ioctl(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		    struct atto_ioctl *ioctl_hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	struct esas2r_buffered_ioctl bi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	memset(&bi, 0, sizeof(bi));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	bi.a = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	bi.ioctl = ioctl_hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	bi.callback = hba_ioctl_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	bi.context = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	bi.done_callback = hba_ioctl_done_callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	bi.done_context = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	bi.offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	return handle_buffered_ioctl(&bi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 			struct esas2r_sas_nvram *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	int result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	a->nvram_command_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	rq->comp_cb = complete_nvr_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	if (esas2r_nvram_write(a, rq, data)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		/* now wait around for it to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		while (!a->nvram_command_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 			wait_event_interruptible(a->nvram_waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 						 a->nvram_command_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		/* done, check the status. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		if (rq->req_stat == RS_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 			result = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) /* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) int esas2r_ioctl_handler(void *hostdata, unsigned int cmd, void __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	struct atto_express_ioctl *ioctl = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	struct esas2r_adapter *a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	struct esas2r_request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	u16 code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	if ((arg == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	    || (cmd < EXPRESS_IOCTL_MIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	    || (cmd > EXPRESS_IOCTL_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	ioctl = memdup_user(arg, sizeof(struct atto_express_ioctl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	if (IS_ERR(ioctl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		esas2r_log(ESAS2R_LOG_WARN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 			   "ioctl_handler access_ok failed for cmd %u, address %p",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 			   cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		return PTR_ERR(ioctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	/* verify the signature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	if (memcmp(ioctl->header.signature,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		   EXPRESS_IOCTL_SIGNATURE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 		   EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		esas2r_log(ESAS2R_LOG_WARN, "invalid signature");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		kfree(ioctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	/* assume success */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	ioctl->header.return_code = IOCTL_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	 * handle EXPRESS_IOCTL_GET_CHANNELS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	 * without paying attention to channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	if (cmd == EXPRESS_IOCTL_GET_CHANNELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		int i = 0, k = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		ioctl->data.chanlist.num_channels = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		while (i < MAX_ADAPTERS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 			if (esas2r_adapters[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 				ioctl->data.chanlist.num_channels++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 				ioctl->data.chanlist.channel[k] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 				k++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 			i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		goto ioctl_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	/* get the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	if (ioctl->header.channel == 0xFF) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		a = (struct esas2r_adapter *)hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		if (ioctl->header.channel >= MAX_ADAPTERS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 			esas2r_adapters[ioctl->header.channel] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 			ioctl->header.return_code = IOCTL_BAD_CHANNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 			esas2r_log(ESAS2R_LOG_WARN, "bad channel value");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 			kfree(ioctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 			return -ENOTSUPP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		a = esas2r_adapters[ioctl->header.channel];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	switch (cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	case EXPRESS_IOCTL_RW_FIRMWARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 			err = esas2r_write_fw(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 					      (char *)ioctl->data.fwrw.image,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 					      0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 					      sizeof(struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 						     atto_express_ioctl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 			if (err >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 				err = esas2r_read_fw(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 						     (char *)ioctl->data.fwrw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 						     image,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 						     0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 						     sizeof(struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 							    atto_express_ioctl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		} else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 			err = esas2r_write_fs(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 					      (char *)ioctl->data.fwrw.image,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 					      0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 					      sizeof(struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 						     atto_express_ioctl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 			if (err >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 				err = esas2r_read_fs(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 						     (char *)ioctl->data.fwrw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 						     image,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 						     0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 						     sizeof(struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 							    atto_express_ioctl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	case EXPRESS_IOCTL_READ_PARAMS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		memcpy(ioctl->data.prw.data_buffer, a->nvram,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		       sizeof(struct esas2r_sas_nvram));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		ioctl->data.prw.code = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	case EXPRESS_IOCTL_WRITE_PARAMS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		rq = esas2r_alloc_request(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		if (rq == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 			kfree(ioctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 			esas2r_log(ESAS2R_LOG_WARN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 			   "could not allocate an internal request");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		code = esas2r_write_params(a, rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 					   (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		ioctl->data.prw.code = code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		esas2r_free_request(a, rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	case EXPRESS_IOCTL_DEFAULT_PARAMS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		esas2r_nvram_get_defaults(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 					  (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		ioctl->data.prw.code = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	case EXPRESS_IOCTL_CHAN_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		ioctl->data.chaninfo.IRQ = a->pcid->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 		ioctl->data.chaninfo.device_id = a->pcid->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		ioctl->data.chaninfo.vendor_id = a->pcid->vendor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 		ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 		ioctl->data.chaninfo.revision_id = a->pcid->revision;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 		ioctl->data.chaninfo.pci_bus = a->pcid->bus->number;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 		ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		ioctl->data.chaninfo.core_rev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		ioctl->data.chaninfo.host_no = a->host->host_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		ioctl->data.chaninfo.hbaapi_rev = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	case EXPRESS_IOCTL_SMP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 		ioctl->header.return_code = handle_smp_ioctl(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 							     &ioctl->data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 							     ioctl_smp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	case EXPRESS_CSMI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 		ioctl->header.return_code =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 			handle_csmi_ioctl(a, &ioctl->data.csmi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	case EXPRESS_IOCTL_HBA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		ioctl->header.return_code = handle_hba_ioctl(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 							     &ioctl->data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 							     ioctl_hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	case EXPRESS_IOCTL_VDA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		err = esas2r_write_vda(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 				       (char *)&ioctl->data.ioctl_vda,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 				       0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 				       sizeof(struct atto_ioctl_vda) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 				       ioctl->data.ioctl_vda.data_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		if (err >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 			err = esas2r_read_vda(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 					      (char *)&ioctl->data.ioctl_vda,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 					      0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 					      sizeof(struct atto_ioctl_vda) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 					      ioctl->data.ioctl_vda.data_length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	case EXPRESS_IOCTL_GET_MOD_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		ioctl->data.modinfo.adapter = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		ioctl->data.modinfo.pci_dev = a->pcid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		ioctl->data.modinfo.scsi_host = a->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		ioctl->data.modinfo.host_no = a->host->host_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 		esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		ioctl->header.return_code = IOCTL_ERR_INVCMD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) ioctl_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %u", err,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 			   cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		switch (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 		case -ENOMEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 		case -EBUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 			ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		case -ENOSYS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 		case -EINVAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 			ioctl->header.return_code = IOCTL_INVALID_PARAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 			ioctl->header.return_code = IOCTL_GENERAL_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	/* Always copy the buffer back, if only to pick up the status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	err = copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	if (err != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 		esas2r_log(ESAS2R_LOG_WARN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 			   "ioctl_handler copy_to_user didn't copy everything (err %d, cmd %u)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 			   err, cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		kfree(ioctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	kfree(ioctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) int esas2r_ioctl(struct scsi_device *sd, unsigned int cmd, void __user *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) static void free_fw_buffers(struct esas2r_adapter *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	if (a->firmware.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		dma_free_coherent(&a->pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 				  (size_t)a->firmware.orig_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 				  a->firmware.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 				  (dma_addr_t)a->firmware.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		a->firmware.data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	free_fw_buffers(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	a->firmware.orig_len = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	a->firmware.data = dma_alloc_coherent(&a->pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 					      (size_t)length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 					      (dma_addr_t *)&a->firmware.phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 					      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	if (!a->firmware.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 		esas2r_debug("buffer alloc failed!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) /* Handle a call to read firmware. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	esas2r_trace_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	/* if the cached header is a status, simply copy it over and return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	if (a->firmware.state == FW_STATUS_ST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		int size = min_t(int, count, sizeof(a->firmware.header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		esas2r_trace_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 		memcpy(buf, &a->firmware.header, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 		esas2r_debug("esas2r_read_fw: STATUS size %d", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	 * if the cached header is a command, do it if at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	 * offset 0, otherwise copy the pieces.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	if (a->firmware.state == FW_COMMAND_ST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 		u32 length = a->firmware.header.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 		esas2r_trace_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		esas2r_debug("esas2r_read_fw: COMMAND length %d off %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 			     length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 			     off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 		if (off == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 			if (a->firmware.header.action == FI_ACT_UP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 				if (!allocate_fw_buffers(a, length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 					return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 				/* copy header over */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 				memcpy(a->firmware.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 				       &a->firmware.header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 				       sizeof(a->firmware.header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 				do_fm_api(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 					  (struct esas2r_flash_img *)a->firmware.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 			} else if (a->firmware.header.action == FI_ACT_UPSZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 				int size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 					min((int)count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 					    (int)sizeof(a->firmware.header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 				do_fm_api(a, &a->firmware.header);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 				memcpy(buf, &a->firmware.header, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 				esas2r_debug("FI_ACT_UPSZ size %d", size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 				return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 				esas2r_debug("invalid action %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 					     a->firmware.header.action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 				return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 		if (count + off > length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 			count = length - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 		if (count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 		if (!a->firmware.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 			esas2r_debug(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 				"read: nonzero offset but no buffer available!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 			     count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 			     length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		memcpy(buf, &a->firmware.data[off], count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 		/* when done, release the buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		if (length <= off + count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 			esas2r_debug("esas2r_read_fw: freeing buffer!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 			free_fw_buffers(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 		return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	esas2r_trace_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	esas2r_debug("esas2r_read_fw: invalid firmware state %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		     a->firmware.state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) /* Handle a call to write firmware. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		    int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	u32 length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	if (off == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		struct esas2r_flash_img *header =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 			(struct esas2r_flash_img *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		/* assume version 0 flash image */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		int min_size = sizeof(struct esas2r_flash_img_v0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		a->firmware.state = FW_INVALID_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 		/* validate the version field first */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		if (count < 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		    ||  header->fi_version > FI_VERSION_1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 			esas2r_debug(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 				"esas2r_write_fw: short header or invalid version");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		/* See if its a version 1 flash image */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 		if (header->fi_version == FI_VERSION_1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 			min_size = sizeof(struct esas2r_flash_img);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 		/* If this is the start, the header must be full and valid. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 		if (count < min_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 			esas2r_debug("esas2r_write_fw: short header, aborting");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		/* Make sure the size is reasonable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 		length = header->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 		if (length > 1024 * 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 			esas2r_debug(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 				"esas2r_write_fw: hosed, length %d  fi_version %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 				length, header->fi_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 		 * If this is a write command, allocate memory because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		 * we have to cache everything. otherwise, just cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		 * the header, because the read op will do the command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 		if (header->action == FI_ACT_DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 			if (!allocate_fw_buffers(a, length))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 			 * Store the command, so there is context on subsequent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 			 * calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 			memcpy(&a->firmware.header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 			       buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 			       sizeof(*header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		} else if (header->action == FI_ACT_UP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 			   ||  header->action == FI_ACT_UPSZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 			/* Save the command, result will be picked up on read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 			memcpy(&a->firmware.header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 			       buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 			       sizeof(*header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 			a->firmware.state = FW_COMMAND_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 			esas2r_debug(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 				"esas2r_write_fw: COMMAND, count %d, action %d ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 				count, header->action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 			 * Pretend we took the whole buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 			 * so we don't get bothered again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 			return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 			esas2r_debug("esas2r_write_fw: invalid action %d ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 				     a->firmware.header.action);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 			return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 		length = a->firmware.header.length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 	 * We only get here on a download command, regardless of offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	 * the chunks written by the system need to be cached, and when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	 * the final one arrives, issue the fmapi command.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	if (off + count > length)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		count = length - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	if (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		esas2r_debug("esas2r_write_fw: off %d count %d length %d", off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 			     count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 			     length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		 * On a full upload, the system tries sending the whole buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 		 * there's nothing to do with it, so just drop it here, before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 		 * trying to copy over into unallocated memory!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 		if (a->firmware.header.action == FI_ACT_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 			return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		if (!a->firmware.data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 			esas2r_debug(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 				"write: nonzero offset but no buffer available!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 		memcpy(&a->firmware.data[off], buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 		if (length == off + count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 			do_fm_api(a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 				  (struct esas2r_flash_img *)a->firmware.data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 			 * Now copy the header result to be picked up by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 			 * next read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 			memcpy(&a->firmware.header,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 			       a->firmware.data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 			       sizeof(a->firmware.header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 			a->firmware.state = FW_STATUS_ST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 			esas2r_debug("write completed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 			 * Since the system has the data buffered, the only way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 			 * this can leak is if a root user writes a program
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 			 * that writes a shorter buffer than it claims, and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 			 * copyin fails.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 			free_fw_buffers(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) /* Callback for the completion of a VDA request. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) static void vda_complete_req(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 			     struct esas2r_request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	a->vda_command_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 	wake_up_interruptible(&a->vda_waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) /* Scatter/gather callback for VDA requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 	int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	(*addr) = a->ppvda_buffer + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	return VDA_MAX_BUFFER_SIZE - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) /* Handle a call to read a VDA command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	if (!a->vda_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	if (off == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 		struct esas2r_request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		struct atto_ioctl_vda *vi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 			(struct atto_ioctl_vda *)a->vda_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		struct esas2r_sg_context sgc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 		bool wait_for_completion;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		 * Presumeably, someone has already written to the vda_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		 * and now they are reading the node the response, so now we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		 * will actually issue the request to the chip and reply.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		/* allocate a request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 		rq = esas2r_alloc_request(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		if (rq == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 			esas2r_debug("esas2r_read_vda: out of requests");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 		rq->comp_cb = vda_complete_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		sgc.first_req = rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 		sgc.adapter = a;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 		sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		a->vda_command_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		wait_for_completion =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 			esas2r_process_vda_ioctl(a, vi, rq, &sgc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		if (wait_for_completion) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 			/* now wait around for it to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 			while (!a->vda_command_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 				wait_event_interruptible(a->vda_waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 							 a->vda_command_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 		esas2r_free_request(a, (struct esas2r_request *)rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	if (off > VDA_MAX_BUFFER_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	if (count + off > VDA_MAX_BUFFER_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 		count = VDA_MAX_BUFFER_SIZE - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	if (count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	memcpy(buf, a->vda_buffer + off, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) /* Handle a call to write a VDA command. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 		     int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	 * allocate memory for it, if not already done.  once allocated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	 * we will keep it around until the driver is unloaded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	if (!a->vda_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		a->vda_buffer = dma_alloc_coherent(&a->pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 						   (size_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 						   VDA_MAX_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 						   &dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 						   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		a->ppvda_buffer = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 	if (!a->vda_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	if (off > VDA_MAX_BUFFER_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 	if (count + off > VDA_MAX_BUFFER_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 		count = VDA_MAX_BUFFER_SIZE - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	if (count < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	memcpy(a->vda_buffer + off, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) /* Callback for the completion of an FS_API request.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) static void fs_api_complete_req(struct esas2r_adapter *a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 				struct esas2r_request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	a->fs_api_command_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	wake_up_interruptible(&a->fs_api_waiter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) /* Scatter/gather callback for VDA requests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 	struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 	struct esas2r_ioctl_fs *fs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		(struct esas2r_ioctl_fs *)a->fs_api_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	(*addr) = a->ppfs_api_buffer + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	return a->fs_api_buffer_size - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) /* Handle a call to read firmware via FS_API. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	if (!a->fs_api_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	if (off == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		struct esas2r_request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		struct esas2r_sg_context sgc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		struct esas2r_ioctl_fs *fs =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 			(struct esas2r_ioctl_fs *)a->fs_api_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		/* If another flash request is already in progress, return. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		if (mutex_lock_interruptible(&a->fs_api_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) busy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 			fs->status = ATTO_STS_OUT_OF_RSRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 		 * Presumeably, someone has already written to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		 * fs_api_buffer, and now they are reading the node the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		 * response, so now we will actually issue the request to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		 * chip and reply. Allocate a request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 		rq = esas2r_alloc_request(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		if (rq == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 			esas2r_debug("esas2r_read_fs: out of requests");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 			mutex_unlock(&a->fs_api_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 			goto busy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		rq->comp_cb = fs_api_complete_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		/* Set up the SGCONTEXT for to build the s/g table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		sgc.cur_offset = fs->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 		sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 		a->fs_api_command_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 			if (fs->status == ATTO_STS_OUT_OF_RSRC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 				count = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 			goto dont_wait;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		/* Now wait around for it to complete. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 		while (!a->fs_api_command_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 			wait_event_interruptible(a->fs_api_waiter,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 						 a->fs_api_command_done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) dont_wait:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		/* Free the request and keep going */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 		mutex_unlock(&a->fs_api_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		esas2r_free_request(a, (struct esas2r_request *)rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 		/* Pick up possible error code from above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 		if (count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 			return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	if (off > a->fs_api_buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	if (count + off > a->fs_api_buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 		count = a->fs_api_buffer_size - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	if (count < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	memcpy(buf, a->fs_api_buffer + off, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) /* Handle a call to write firmware via FS_API. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 		    int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	if (off == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		u32 length = fs->command.length + offsetof(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 			struct esas2r_ioctl_fs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 			data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		 * Special case, for BEGIN commands, the length field
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		 * is lying to us, so just get enough for the header.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		if (fs->command.command == ESAS2R_FS_CMD_BEGINW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 			length = offsetof(struct esas2r_ioctl_fs, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		 * Beginning a command.  We assume we'll get at least
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		 * enough in the first write so we can look at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		 * header and see how much we need to alloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		if (count < offsetof(struct esas2r_ioctl_fs, data))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 		/* Allocate a buffer or use the existing buffer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		if (a->fs_api_buffer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 			if (a->fs_api_buffer_size < length) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 				/* Free too-small buffer and get a new one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 				dma_free_coherent(&a->pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 						  (size_t)a->fs_api_buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 						  a->fs_api_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 						  (dma_addr_t)a->ppfs_api_buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 				goto re_allocate_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) re_allocate_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 			a->fs_api_buffer_size = length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 			a->fs_api_buffer = dma_alloc_coherent(&a->pcid->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 							      (size_t)a->fs_api_buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 							      (dma_addr_t *)&a->ppfs_api_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 							      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	if (!a->fs_api_buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	if (off > a->fs_api_buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	if (count + off > a->fs_api_buffer_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 		count = a->fs_api_buffer_size - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	if (count < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	memcpy(a->fs_api_buffer + off, buf, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) }