Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * HighPoint RR3xxx/4xxx controller driver for Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Please report bugs/comments/suggestions to linux@highpoint-tech.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * For more information, visit http://www.highpoint-tech.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <asm/div64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <scsi/scsi_cmnd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <scsi/scsi_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <scsi/scsi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <scsi/scsi_tcq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <scsi/scsi_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include "hptiop.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) MODULE_AUTHOR("HighPoint Technologies, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) static char driver_name[] = "hptiop";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) static const char driver_ver[] = "v1.10.0";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) 				struct hpt_iop_request_scsi_command *req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 	u32 req = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	for (i = 0; i < millisec; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 		req = readl(&hba->u.itl.iop->inbound_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 		if (req != IOPMU_QUEUE_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 		msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	if (req != IOPMU_QUEUE_EMPTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 		writel(req, &hba->u.itl.iop->outbound_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 		readl(&hba->u.itl.iop->outbound_intstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) static int iop_wait_ready_mvfrey(struct hptiop_hba *hba, u32 millisec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 		hptiop_host_request_callback_itl(hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 				tag & ~IOPMU_QUEUE_ADDR_HOST_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 		hptiop_iop_request_callback_itl(hba, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	u32 req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	while ((req = readl(&hba->u.itl.iop->outbound_queue)) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 						IOPMU_QUEUE_EMPTY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 		if (req & IOPMU_QUEUE_MASK_HOST_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 			hptiop_request_callback_itl(hba, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 			struct hpt_iop_request_header __iomem * p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 			p = (struct hpt_iop_request_header __iomem *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 				((char __iomem *)hba->u.itl.iop + req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 			if (readl(&p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 				if (readl(&p->context))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 					hptiop_request_callback_itl(hba, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 					writel(1, &p->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 				hptiop_request_callback_itl(hba, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) static int iop_intr_itl(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	void __iomem *plx = hba->u.itl.plx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	if (plx && readl(plx + 0x11C5C) & 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		writel(1, plx + 0x11C60);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	status = readl(&iop->outbound_intstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	if (status & IOPMU_OUTBOUND_INT_MSG0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		u32 msg = readl(&iop->outbound_msgaddr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		dprintk("received outbound msg %x\n", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 		writel(IOPMU_OUTBOUND_INT_MSG0, &iop->outbound_intstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 		hptiop_message_callback(hba, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		hptiop_drain_outbound_queue_itl(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	u32 outbound_tail = readl(&mu->outbound_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	u32 outbound_head = readl(&mu->outbound_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	if (outbound_tail != outbound_head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 		u64 p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 		outbound_tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 		if (outbound_tail == MVIOP_QUEUE_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 			outbound_tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 		writel(outbound_tail, &mu->outbound_tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 		return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) static void mv_inbound_write(u64 p, struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	u32 inbound_head = readl(&hba->u.mv.mu->inbound_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	u32 head = inbound_head + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	if (head == MVIOP_QUEUE_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	writel(head, &hba->u.mv.mu->inbound_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	writel(MVIOP_MU_INBOUND_INT_POSTQUEUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 			&hba->u.mv.regs->inbound_doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	u32 req_type = (tag >> 5) & 0x7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	struct hpt_iop_request_scsi_command *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	dprintk("hptiop_request_callback_mv: tag=%llx\n", tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	switch (req_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	case IOP_REQUEST_TYPE_GET_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	case IOP_REQUEST_TYPE_SET_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		hba->msg_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		req = hba->reqs[tag >> 8].req_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 		if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 			req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 		hptiop_finish_scsi_req(hba, tag>>8, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static int iop_intr_mv(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	status = readl(&hba->u.mv.regs->outbound_doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	writel(~status, &hba->u.mv.regs->outbound_doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 		u32 msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 		msg = readl(&hba->u.mv.mu->outbound_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 		dprintk("received outbound msg %x\n", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 		hptiop_message_callback(hba, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		u64 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 		while ((tag = mv_outbound_read(hba->u.mv.mu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 			hptiop_request_callback_mv(hba, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) static void hptiop_request_callback_mvfrey(struct hptiop_hba *hba, u32 _tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	u32 req_type = _tag & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	struct hpt_iop_request_scsi_command *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	switch (req_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	case IOP_REQUEST_TYPE_GET_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	case IOP_REQUEST_TYPE_SET_CONFIG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		hba->msg_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	case IOP_REQUEST_TYPE_SCSI_COMMAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		req = hba->reqs[(_tag >> 4) & 0xff].req_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 			req->header.result = IOP_RESULT_SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 		hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) static int iop_intr_mvfrey(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	u32 _tag, status, cptr, cur_rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	if (hba->initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	status = readl(&(hba->u.mvfrey.mu->f0_doorbell));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		writel(status, &(hba->u.mvfrey.mu->f0_doorbell));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		if (status & CPU_TO_F0_DRBL_MSG_BIT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 			u32 msg = readl(&(hba->u.mvfrey.mu->cpu_to_f0_msg_a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 			dprintk("received outbound msg %x\n", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 			hptiop_message_callback(hba, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	status = readl(&(hba->u.mvfrey.mu->isr_cause));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		writel(status, &(hba->u.mvfrey.mu->isr_cause));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 			cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 			cur_rptr = hba->u.mvfrey.outlist_rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 			while (cur_rptr != cptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 				cur_rptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 				if (cur_rptr ==	hba->u.mvfrey.list_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 					cur_rptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 				_tag = hba->u.mvfrey.outlist[cur_rptr].val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 				BUG_ON(!(_tag & IOPMU_QUEUE_MASK_HOST_BITS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 				hptiop_request_callback_mvfrey(hba, _tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 				ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 			hba->u.mvfrey.outlist_rptr = cur_rptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		} while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	if (hba->initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) static int iop_send_sync_request_itl(struct hptiop_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 					void __iomem *_req, u32 millisec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	struct hpt_iop_request_header __iomem *req = _req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	writel(0, &req->context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	writel((unsigned long)req - (unsigned long)hba->u.itl.iop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 			&hba->u.itl.iop->inbound_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	readl(&hba->u.itl.iop->outbound_intstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	for (i = 0; i < millisec; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		iop_intr_itl(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		if (readl(&req->context))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) static int iop_send_sync_request_mv(struct hptiop_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 					u32 size_bits, u32 millisec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	hba->msg_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	mv_inbound_write(hba->u.mv.internal_req_phy |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 			MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	for (i = 0; i < millisec; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 		iop_intr_mv(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		if (hba->msg_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 		msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) static int iop_send_sync_request_mvfrey(struct hptiop_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 					u32 size_bits, u32 millisec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	struct hpt_iop_request_header *reqhdr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		hba->u.mvfrey.internal_req.req_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	hba->msg_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	for (i = 0; i < millisec; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		iop_intr_mvfrey(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		if (hba->msg_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	return hba->msg_done ? 0 : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	readl(&hba->u.itl.iop->outbound_intstatus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	writel(msg, &hba->u.mv.mu->inbound_msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	writel(MVIOP_MU_INBOUND_INT_MSG, &hba->u.mv.regs->inbound_doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	readl(&hba->u.mv.regs->inbound_doorbell);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) static void hptiop_post_msg_mvfrey(struct hptiop_hba *hba, u32 msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	writel(msg, &(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	readl(&(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	hba->msg_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	hba->ops->disable_intr(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	hba->ops->post_msg(hba, msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	for (i = 0; i < millisec; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		spin_lock_irq(hba->host->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		hba->ops->iop_intr(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		spin_unlock_irq(hba->host->host_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		if (hba->msg_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		msleep(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	hba->ops->enable_intr(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	return hba->msg_done? 0 : -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) static int iop_get_config_itl(struct hptiop_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 				struct hpt_iop_request_get_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	u32 req32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	struct hpt_iop_request_get_config __iomem *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	req32 = readl(&hba->u.itl.iop->inbound_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	if (req32 == IOPMU_QUEUE_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	req = (struct hpt_iop_request_get_config __iomem *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 			((unsigned long)hba->u.itl.iop + req32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	writel(0, &req->header.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	writel(sizeof(struct hpt_iop_request_get_config), &req->header.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	writel(IOP_RESULT_PENDING, &req->header.result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	if (iop_send_sync_request_itl(hba, req, 20000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		dprintk("Get config send cmd failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	memcpy_fromio(config, req, sizeof(*config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	writel(req32, &hba->u.itl.iop->outbound_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) static int iop_get_config_mv(struct hptiop_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 				struct hpt_iop_request_get_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	struct hpt_iop_request_get_config *req = hba->u.mv.internal_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	req->header.size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		cpu_to_le32(sizeof(struct hpt_iop_request_get_config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	req->header.context_hi32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	if (iop_send_sync_request_mv(hba, 0, 20000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		dprintk("Get config send cmd failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	memcpy(config, req, sizeof(struct hpt_iop_request_get_config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) static int iop_get_config_mvfrey(struct hptiop_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 				struct hpt_iop_request_get_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			info->header.type != IOP_REQUEST_TYPE_GET_CONFIG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	config->interface_version = info->interface_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	config->firmware_version = info->firmware_version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	config->max_requests = info->max_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	config->request_size = info->request_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	config->max_sg_count = info->max_sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	config->data_transfer_length = info->data_transfer_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	config->alignment_mask = info->alignment_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	config->max_devices = info->max_devices;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	config->sdram_size = info->sdram_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) static int iop_set_config_itl(struct hptiop_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 				struct hpt_iop_request_set_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	u32 req32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	struct hpt_iop_request_set_config __iomem *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	req32 = readl(&hba->u.itl.iop->inbound_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	if (req32 == IOPMU_QUEUE_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	req = (struct hpt_iop_request_set_config __iomem *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 			((unsigned long)hba->u.itl.iop + req32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 		(u8 *)config + sizeof(struct hpt_iop_request_header),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		sizeof(struct hpt_iop_request_set_config) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 			sizeof(struct hpt_iop_request_header));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	writel(0, &req->header.flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	writel(sizeof(struct hpt_iop_request_set_config), &req->header.size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	writel(IOP_RESULT_PENDING, &req->header.result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	if (iop_send_sync_request_itl(hba, req, 20000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		dprintk("Set config send cmd failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	writel(req32, &hba->u.itl.iop->outbound_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) static int iop_set_config_mv(struct hptiop_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 				struct hpt_iop_request_set_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	struct hpt_iop_request_set_config *req = hba->u.mv.internal_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	req->header.size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	req->header.context_hi32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	if (iop_send_sync_request_mv(hba, 0, 20000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		dprintk("Set config send cmd failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) static int iop_set_config_mvfrey(struct hptiop_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 				struct hpt_iop_request_set_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	struct hpt_iop_request_set_config *req =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		hba->u.mvfrey.internal_req.req_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	req->header.size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	req->header.context_hi32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	if (iop_send_sync_request_mvfrey(hba, 0, 20000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		dprintk("Set config send cmd failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		&hba->u.itl.iop->outbound_intmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 		&hba->u.mv.regs->outbound_intmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) static void hptiop_enable_intr_mvfrey(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	writel(CPU_TO_F0_DRBL_MSG_BIT, &(hba->u.mvfrey.mu->f0_doorbell_enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	writel(0x1, &(hba->u.mvfrey.mu->isr_enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) static int hptiop_initialize_iop(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	/* enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	hba->ops->enable_intr(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	hba->initialized = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	/* start background tasks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if (iop_send_sync_msg(hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 			IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		printk(KERN_ERR "scsi%d: fail to start background task\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 			hba->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	u32 mem_base_phy, length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	void __iomem *mem_base_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	struct pci_dev *pcidev = hba->pcidev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		printk(KERN_ERR "scsi%d: pci resource invalid\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 				hba->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	mem_base_phy = pci_resource_start(pcidev, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	length = pci_resource_len(pcidev, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	mem_base_virt = ioremap(mem_base_phy, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	if (!mem_base_virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 				hba->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	return mem_base_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	struct pci_dev *pcidev = hba->pcidev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	hba->u.itl.iop = hptiop_map_pci_bar(hba, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	if (hba->u.itl.iop == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	if ((pcidev->device & 0xff00) == 0x4400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		hba->u.itl.plx = hba->u.itl.iop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		hba->u.itl.iop = hptiop_map_pci_bar(hba, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		if (hba->u.itl.iop == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 			iounmap(hba->u.itl.plx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	if (hba->u.itl.plx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		iounmap(hba->u.itl.plx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	iounmap(hba->u.itl.iop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	hba->u.mv.regs = hptiop_map_pci_bar(hba, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	if (hba->u.mv.regs == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	hba->u.mv.mu = hptiop_map_pci_bar(hba, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	if (hba->u.mv.mu == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 		iounmap(hba->u.mv.regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	hba->u.mvfrey.config = hptiop_map_pci_bar(hba, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	if (hba->u.mvfrey.config == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	hba->u.mvfrey.mu = hptiop_map_pci_bar(hba, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	if (hba->u.mvfrey.mu == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		iounmap(hba->u.mvfrey.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	iounmap(hba->u.mv.regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	iounmap(hba->u.mv.mu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	iounmap(hba->u.mvfrey.config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	iounmap(hba->u.mvfrey.mu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	dprintk("iop message 0x%x\n", msg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	if (msg == IOPMU_INBOUND_MSG0_NOP ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		msg == IOPMU_INBOUND_MSG0_RESET_COMM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 		hba->msg_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	if (!hba->initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	if (msg == IOPMU_INBOUND_MSG0_RESET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 		atomic_set(&hba->resetting, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 		wake_up(&hba->reset_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	else if (msg <= IOPMU_INBOUND_MSG0_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		hba->msg_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) static struct hptiop_request *get_req(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	struct hptiop_request *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	dprintk("get_req : req=%p\n", hba->req_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	ret = hba->req_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		hba->req_list = ret->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) static void free_req(struct hptiop_hba *hba, struct hptiop_request *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	dprintk("free_req(%d, %p)\n", req->index, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	req->next = hba->req_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	hba->req_list = req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 				struct hpt_iop_request_scsi_command *req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	struct scsi_cmnd *scp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	dprintk("hptiop_finish_scsi_req: req=%p, type=%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			"result=%d, context=0x%x tag=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 			req, req->header.type, req->header.result,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			req->header.context, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	BUG_ON(!req->header.result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	scp = hba->reqs[tag].scp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	if (HPT_SCP(scp)->mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		scsi_dma_unmap(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	switch (le32_to_cpu(req->header.result)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	case IOP_RESULT_SUCCESS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		scsi_set_resid(scp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		scp->result = (DID_OK<<16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	case IOP_RESULT_BAD_TARGET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		scp->result = (DID_BAD_TARGET<<16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	case IOP_RESULT_BUSY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		scp->result = (DID_BUS_BUSY<<16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	case IOP_RESULT_RESET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		scp->result = (DID_RESET<<16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	case IOP_RESULT_FAIL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 		scp->result = (DID_ERROR<<16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	case IOP_RESULT_INVALID_REQUEST:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		scp->result = (DID_ABORT<<16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	case IOP_RESULT_CHECK_CONDITION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		scsi_set_resid(scp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 			scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		scp->result = SAM_STAT_CHECK_CONDITION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		memcpy(scp->sense_buffer, &req->sg_list, SCSI_SENSE_BUFFERSIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 		goto skip_resid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	scsi_set_resid(scp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) skip_resid:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	dprintk("scsi_done(%p)\n", scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	scp->scsi_done(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	free_req(hba, &hba->reqs[tag]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	struct hpt_iop_request_scsi_command *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	u32 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	if (hba->iopintf_v2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		req = hba->reqs[tag].req_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		tag = _tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		req = hba->reqs[tag].req_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	hptiop_finish_scsi_req(hba, tag, req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	struct hpt_iop_request_header __iomem *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	struct hpt_iop_request_ioctl_command __iomem *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	struct hpt_ioctl_k *arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	req = (struct hpt_iop_request_header __iomem *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 			((unsigned long)hba->u.itl.iop + tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			"result=%d, context=0x%x tag=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			req, readl(&req->type), readl(&req->result),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			readl(&req->context), tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	BUG_ON(!readl(&req->result));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	p = (struct hpt_iop_request_ioctl_command __iomem *)req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	arg = (struct hpt_ioctl_k *)(unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		(readl(&req->context) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			((u64)readl(&req->context_hi32)<<32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	if (readl(&req->result) == IOP_RESULT_SUCCESS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		arg->result = HPT_IOCTL_RESULT_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 		if (arg->outbuf_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			memcpy_fromio(arg->outbuf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 				&p->buf[(readl(&p->inbuf_size) + 3)& ~3],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 				arg->outbuf_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		if (arg->bytes_returned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			*arg->bytes_returned = arg->outbuf_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		arg->result = HPT_IOCTL_RESULT_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	arg->done(arg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	writel(tag, &hba->u.itl.iop->outbound_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) static irqreturn_t hptiop_intr(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	struct hptiop_hba  *hba = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	int  handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 	spin_lock_irqsave(hba->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	handled = hba->ops->iop_intr(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	spin_unlock_irqrestore(hba->host->host_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	return handled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	struct Scsi_Host *host = scp->device->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	int idx, nseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	nseg = scsi_dma_map(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	BUG_ON(nseg < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	if (!nseg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	HPT_SCP(scp)->sgcnt = nseg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	HPT_SCP(scp)->mapped = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			hba->ops->host_phy_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		psg[idx].size = cpu_to_le32(sg_dma_len(sg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			cpu_to_le32(1) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	return HPT_SCP(scp)->sgcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) static void hptiop_post_req_itl(struct hptiop_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 					struct hptiop_request *_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	struct hpt_iop_request_header *reqhdr = _req->req_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 							(u32)_req->index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	reqhdr->context_hi32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if (hba->iopintf_v2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		u32 size, size_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		size = le32_to_cpu(reqhdr->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		if (size < 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 			size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		else if (size < 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 			size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 						IOPMU_QUEUE_ADDR_HOST_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		writel(_req->req_shifted_phy | size_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 			&hba->u.itl.iop->inbound_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		writel(_req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 					&hba->u.itl.iop->inbound_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) static void hptiop_post_req_mv(struct hptiop_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 					struct hptiop_request *_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	struct hpt_iop_request_header *reqhdr = _req->req_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	u32 size, size_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	reqhdr->context = cpu_to_le32(_req->index<<8 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 					IOP_REQUEST_TYPE_SCSI_COMMAND<<5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	reqhdr->context_hi32 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	size = le32_to_cpu(reqhdr->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	if (size <= 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		size_bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	else if (size <= 256*2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		size_bit = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	else if (size <= 256*3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		size_bit = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		size_bit = 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	mv_inbound_write((_req->req_shifted_phy << 5) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) static void hptiop_post_req_mvfrey(struct hptiop_hba *hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 					struct hptiop_request *_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	struct hpt_iop_request_header *reqhdr = _req->req_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	u32 index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 			IOP_REQUEST_FLAG_ADDR_BITS |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			((_req->req_shifted_phy >> 11) & 0xffff0000));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 			(_req->index << 4) | reqhdr->type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	reqhdr->context_hi32 = cpu_to_le32((_req->req_shifted_phy << 5) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 			0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	hba->u.mvfrey.inlist_wptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	index = hba->u.mvfrey.inlist_wptr & 0x3fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	if (index == hba->u.mvfrey.list_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 		index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		hba->u.mvfrey.inlist_wptr &= ~0x3fff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	hba->u.mvfrey.inlist[index].addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 			(dma_addr_t)_req->req_shifted_phy << 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	writel(hba->u.mvfrey.inlist_wptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		&(hba->u.mvfrey.mu->inbound_write_ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	readl(&(hba->u.mvfrey.mu->inbound_write_ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) static int hptiop_reset_comm_itl(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) static int hptiop_reset_comm_mv(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	u32 list_count = hba->u.mvfrey.list_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	/* wait 100ms for MCU ready */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	writel(cpu_to_le32(hba->u.mvfrey.inlist_phy & 0xffffffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 			&(hba->u.mvfrey.mu->inbound_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	writel(cpu_to_le32((hba->u.mvfrey.inlist_phy >> 16) >> 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			&(hba->u.mvfrey.mu->inbound_base_high));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	writel(cpu_to_le32(hba->u.mvfrey.outlist_phy & 0xffffffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 			&(hba->u.mvfrey.mu->outbound_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	writel(cpu_to_le32((hba->u.mvfrey.outlist_phy >> 16) >> 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			&(hba->u.mvfrey.mu->outbound_base_high));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	writel(cpu_to_le32(hba->u.mvfrey.outlist_cptr_phy & 0xffffffff),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			&(hba->u.mvfrey.mu->outbound_shadow_base));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	writel(cpu_to_le32((hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			&(hba->u.mvfrey.mu->outbound_shadow_base_high));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	hba->u.mvfrey.inlist_wptr = (list_count - 1) | CL_POINTER_TOGGLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	*hba->u.mvfrey.outlist_cptr = (list_count - 1) | CL_POINTER_TOGGLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	hba->u.mvfrey.outlist_rptr = list_count - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 				void (*done)(struct scsi_cmnd *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	struct Scsi_Host *host = scp->device->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	struct hpt_iop_request_scsi_command *req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	int sg_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	struct hptiop_request *_req;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	BUG_ON(!done);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	scp->scsi_done = done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	_req = get_req(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 	if (_req == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		dprintk("hptiop_queuecmd : no free req\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		return SCSI_MLQUEUE_HOST_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	_req->scp = scp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%llu cdb=(%08x-%08x-%08x-%08x) "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			"req_index=%d, req=%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			scp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 			host->host_no, scp->device->channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			scp->device->id, scp->device->lun,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			cpu_to_be32(((u32 *)scp->cmnd)[0]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			cpu_to_be32(((u32 *)scp->cmnd)[1]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 			cpu_to_be32(((u32 *)scp->cmnd)[2]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 			cpu_to_be32(((u32 *)scp->cmnd)[3]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			_req->index, _req->req_virt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	scp->result = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	if (scp->device->channel ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			(scp->device->id > hba->max_devices) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			((scp->device->id == (hba->max_devices-1)) && scp->device->lun)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		scp->result = DID_BAD_TARGET << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		free_req(hba, _req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		goto cmd_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	req = _req->req_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	/* build S/G table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	sg_count = hptiop_buildsgl(scp, req->sg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	if (!sg_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		HPT_SCP(scp)->mapped = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	req->channel = scp->device->channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	req->target = scp->device->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	req->lun = scp->device->lun;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	req->header.size = cpu_to_le32(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 				sizeof(struct hpt_iop_request_scsi_command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 				 - sizeof(struct hpt_iopsg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 				 + sg_count * sizeof(struct hpt_iopsg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	memcpy(req->cdb, scp->cmnd, sizeof(req->cdb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	hba->ops->post_req(hba, _req);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) cmd_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	dprintk("scsi_done(scp=%p)\n", scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	scp->scsi_done(scp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) static DEF_SCSI_QCMD(hptiop_queuecommand)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static const char *hptiop_info(struct Scsi_Host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	return driver_name_long;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) static int hptiop_reset_hba(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	if (atomic_xchg(&hba->resetting, 1) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		atomic_inc(&hba->reset_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	wait_event_timeout(hba->reset_wq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 			atomic_read(&hba->resetting) == 0, 60 * HZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	if (atomic_read(&hba->resetting)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		/* IOP is in unknown state, abort reset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		printk(KERN_ERR "scsi%d: reset failed\n", hba->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	if (iop_send_sync_msg(hba,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		dprintk("scsi%d: fail to start background task\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 				hba->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) static int hptiop_reset(struct scsi_cmnd *scp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	struct hptiop_hba * hba = (struct hptiop_hba *)scp->device->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	printk(KERN_WARNING "hptiop_reset(%d/%d/%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	       scp->device->host->host_no, -1, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	return hptiop_reset_hba(hba)? FAILED : SUCCESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 					  int queue_depth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	if (queue_depth > hba->max_requests)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		queue_depth = hba->max_requests;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	return scsi_change_queue_depth(sdev, queue_depth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) static ssize_t hptiop_show_version(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 				   struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	return snprintf(buf, PAGE_SIZE, "%s\n", driver_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static ssize_t hptiop_show_fw_version(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 				      struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	struct Scsi_Host *host = class_to_shost(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 				hba->firmware_version >> 24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 				(hba->firmware_version >> 16) & 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 				(hba->firmware_version >> 8) & 0xff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 				hba->firmware_version & 0xff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) static struct device_attribute hptiop_attr_version = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		.name = "driver-version",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		.mode = S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	.show = hptiop_show_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) static struct device_attribute hptiop_attr_fw_version = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	.attr = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		.name = "firmware-version",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		.mode = S_IRUGO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	.show = hptiop_show_fw_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) static struct device_attribute *hptiop_attrs[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	&hptiop_attr_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	&hptiop_attr_fw_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) static int hptiop_slave_config(struct scsi_device *sdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	if (sdev->type == TYPE_TAPE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		blk_queue_max_hw_sectors(sdev->request_queue, 8192);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) static struct scsi_host_template driver_template = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	.module                     = THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	.name                       = driver_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	.queuecommand               = hptiop_queuecommand,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	.eh_host_reset_handler      = hptiop_reset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	.info                       = hptiop_info,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	.emulated                   = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	.proc_name                  = driver_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	.shost_attrs                = hptiop_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	.slave_configure            = hptiop_slave_config,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	.this_id                    = -1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	.change_queue_depth         = hptiop_adjust_disk_queue_depth,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 			0x800, &hba->u.mv.internal_req_phy, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	if (hba->u.mv.internal_req)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	u32 list_count = readl(&hba->u.mvfrey.mu->inbound_conf_ctl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	dma_addr_t phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	BUG_ON(hba->max_request_size == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	if (list_count == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		BUG_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	list_count >>= 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	hba->u.mvfrey.list_count = list_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	hba->u.mvfrey.internal_mem_size = 0x800 +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 			list_count * sizeof(struct mvfrey_inlist_entry) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 			list_count * sizeof(struct mvfrey_outlist_entry) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 			sizeof(int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	p = dma_alloc_coherent(&hba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 			hba->u.mvfrey.internal_mem_size, &phy, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	hba->u.mvfrey.internal_req.req_virt = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	hba->u.mvfrey.internal_req.req_shifted_phy = phy >> 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	hba->u.mvfrey.internal_req.scp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	hba->u.mvfrey.internal_req.next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	p += 0x800;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	phy += 0x800;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	hba->u.mvfrey.inlist_phy = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	p += list_count * sizeof(struct mvfrey_inlist_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	phy += list_count * sizeof(struct mvfrey_inlist_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 	hba->u.mvfrey.outlist_phy = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	p += list_count * sizeof(struct mvfrey_outlist_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	phy += list_count * sizeof(struct mvfrey_outlist_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	hba->u.mvfrey.outlist_cptr = (__le32 *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	hba->u.mvfrey.outlist_cptr_phy = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) static int hptiop_internal_memfree_itl(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	if (hba->u.mv.internal_req) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		dma_free_coherent(&hba->pcidev->dev, 0x800,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 			hba->u.mv.internal_req, hba->u.mv.internal_req_phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) static int hptiop_internal_memfree_mvfrey(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 	if (hba->u.mvfrey.internal_req.req_virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		dma_free_coherent(&hba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 			hba->u.mvfrey.internal_mem_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 			hba->u.mvfrey.internal_req.req_virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 			(dma_addr_t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 			hba->u.mvfrey.internal_req.req_shifted_phy << 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	struct Scsi_Host *host = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	struct hptiop_hba *hba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	struct hptiop_adapter_ops *iop_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	struct hpt_iop_request_get_config iop_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	struct hpt_iop_request_set_config set_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	dma_addr_t start_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	void *start_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	u32 offset, i, req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	dprintk("hptiop_probe(%p)\n", pcidev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	if (pci_enable_device(pcidev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		printk(KERN_ERR "hptiop: fail to enable pci device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 		pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		pcidev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	pci_set_master(pcidev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	/* Enable 64bit DMA if possible */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	iop_ops = (struct hptiop_adapter_ops *)id->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	rc = dma_set_mask(&pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 			  DMA_BIT_MASK(iop_ops->hw_dma_bit_mask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		rc = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		printk(KERN_ERR "hptiop: fail to set dma_mask\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		goto disable_pci_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	if (pci_request_regions(pcidev, driver_name)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		printk(KERN_ERR "hptiop: pci_request_regions failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		goto disable_pci_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	if (!host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		printk(KERN_ERR "hptiop: fail to alloc scsi host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		goto free_pci_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	hba = (struct hptiop_hba *)host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	memset(hba, 0, sizeof(struct hptiop_hba));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	hba->ops = iop_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	hba->pcidev = pcidev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	hba->host = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	hba->initialized = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	hba->iopintf_v2 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	atomic_set(&hba->resetting, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	atomic_set(&hba->reset_count, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	init_waitqueue_head(&hba->reset_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	init_waitqueue_head(&hba->ioctl_wq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	host->max_lun = 128;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	host->max_channel = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	host->io_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	host->n_io_port = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	host->irq = pcidev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	if (hba->ops->map_pci_bar(hba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		goto free_scsi_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	if (hba->ops->iop_wait_ready(hba, 20000)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		printk(KERN_ERR "scsi%d: firmware not ready\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 				hba->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		goto unmap_pci_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	if (hba->ops->family == MV_BASED_IOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		if (hba->ops->internal_memalloc(hba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 			printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 				hba->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 			goto unmap_pci_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	if (hba->ops->get_config(hba, &iop_config)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 		printk(KERN_ERR "scsi%d: get config failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 				hba->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		goto unmap_pci_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	hba->max_requests = min(le32_to_cpu(iop_config.max_requests),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 				HPTIOP_MAX_REQUESTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	hba->max_devices = le32_to_cpu(iop_config.max_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	hba->max_request_size = le32_to_cpu(iop_config.request_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	hba->firmware_version = le32_to_cpu(iop_config.firmware_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	hba->interface_version = le32_to_cpu(iop_config.interface_version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	if (hba->ops->family == MVFREY_BASED_IOP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		if (hba->ops->internal_memalloc(hba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 			printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 				hba->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 			goto unmap_pci_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		if (hba->ops->reset_comm(hba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 			printk(KERN_ERR "scsi%d: reset comm failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 					hba->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 			goto unmap_pci_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	if (hba->firmware_version > 0x01020000 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 			hba->interface_version > 0x01020000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		hba->iopintf_v2 = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	host->max_id = le32_to_cpu(iop_config.max_devices);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	host->can_queue = le32_to_cpu(iop_config.max_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	host->max_cmd_len = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	req_size = sizeof(struct hpt_iop_request_scsi_command)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		+ sizeof(struct hpt_iopsg) * (hba->max_sg_descriptors - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	if ((req_size & 0x1f) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 		req_size = (req_size + 0x1f) & ~0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	set_config.iop_id = cpu_to_le32(host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	set_config.vbus_id = cpu_to_le16(host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	set_config.max_host_request_size = cpu_to_le16(req_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	if (hba->ops->set_config(hba, &set_config)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 		printk(KERN_ERR "scsi%d: set config failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 				hba->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		goto unmap_pci_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	pci_set_drvdata(pcidev, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	if (request_irq(pcidev->irq, hptiop_intr, IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 					driver_name, hba)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		printk(KERN_ERR "scsi%d: request irq %d failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 					hba->host->host_no, pcidev->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		goto unmap_pci_bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	/* Allocate request mem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	dprintk("req_size=%d, max_requests=%d\n", req_size, hba->max_requests);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	hba->req_size = req_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	hba->req_list = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	for (i = 0; i < hba->max_requests; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		start_virt = dma_alloc_coherent(&pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 					hba->req_size + 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 					&start_phy, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		if (!start_virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 			printk(KERN_ERR "scsi%d: fail to alloc request mem\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 						hba->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 			goto free_request_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		hba->dma_coherent[i] = start_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		hba->dma_coherent_handle[i] = start_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		if ((start_phy & 0x1f) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 			offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 			start_phy += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 			start_virt += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		hba->reqs[i].next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		hba->reqs[i].req_virt = start_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		hba->reqs[i].req_shifted_phy = start_phy >> 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		hba->reqs[i].index = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		free_req(hba, &hba->reqs[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	/* Enable Interrupt and start background task */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	if (hptiop_initialize_iop(hba))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		goto free_request_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	if (scsi_add_host(host, &pcidev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		printk(KERN_ERR "scsi%d: scsi_add_host failed\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 					hba->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 		goto free_request_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	scsi_scan_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) free_request_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	for (i = 0; i < hba->max_requests; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 			dma_free_coherent(&hba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 					hba->req_size + 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 					hba->dma_coherent[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 					hba->dma_coherent_handle[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	free_irq(hba->pcidev->irq, hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) unmap_pci_bar:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	hba->ops->internal_memfree(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	hba->ops->unmap_pci_bar(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) free_scsi_host:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	scsi_host_put(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) free_pci_regions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	pci_release_regions(pcidev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) disable_pci_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	pci_disable_device(pcidev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	dprintk("scsi%d: hptiop_probe fail\n", host ? host->host_no : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) static void hptiop_shutdown(struct pci_dev *pcidev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	struct Scsi_Host *host = pci_get_drvdata(pcidev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	dprintk("hptiop_shutdown(%p)\n", hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	/* stop the iop */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 		printk(KERN_ERR "scsi%d: shutdown the iop timeout\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 					hba->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	/* disable all outbound interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	hba->ops->disable_intr(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static void hptiop_disable_intr_itl(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	u32 int_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	int_mask = readl(&hba->u.itl.iop->outbound_intmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	writel(int_mask |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 		IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		&hba->u.itl.iop->outbound_intmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	readl(&hba->u.itl.iop->outbound_intmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	writel(0, &hba->u.mv.regs->outbound_intmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	readl(&hba->u.mv.regs->outbound_intmask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) static void hptiop_disable_intr_mvfrey(struct hptiop_hba *hba)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	writel(0, &(hba->u.mvfrey.mu->f0_doorbell_enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	readl(&(hba->u.mvfrey.mu->f0_doorbell_enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	writel(0, &(hba->u.mvfrey.mu->isr_enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	readl(&(hba->u.mvfrey.mu->isr_enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	readl(&(hba->u.mvfrey.mu->pcie_f0_int_enable));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) static void hptiop_remove(struct pci_dev *pcidev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	struct Scsi_Host *host = pci_get_drvdata(pcidev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	dprintk("scsi%d: hptiop_remove\n", hba->host->host_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	scsi_remove_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	hptiop_shutdown(pcidev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	free_irq(hba->pcidev->irq, hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	for (i = 0; i < hba->max_requests; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		if (hba->dma_coherent[i] && hba->dma_coherent_handle[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 			dma_free_coherent(&hba->pcidev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 					hba->req_size + 0x20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 					hba->dma_coherent[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 					hba->dma_coherent_handle[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	hba->ops->internal_memfree(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	hba->ops->unmap_pci_bar(hba);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	pci_release_regions(hba->pcidev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	pci_set_drvdata(hba->pcidev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	pci_disable_device(hba->pcidev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	scsi_host_put(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) static struct hptiop_adapter_ops hptiop_itl_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	.family            = INTEL_BASED_IOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	.iop_wait_ready    = iop_wait_ready_itl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	.internal_memalloc = hptiop_internal_memalloc_itl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	.internal_memfree  = hptiop_internal_memfree_itl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	.map_pci_bar       = hptiop_map_pci_bar_itl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	.unmap_pci_bar     = hptiop_unmap_pci_bar_itl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	.enable_intr       = hptiop_enable_intr_itl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	.disable_intr      = hptiop_disable_intr_itl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	.get_config        = iop_get_config_itl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	.set_config        = iop_set_config_itl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	.iop_intr          = iop_intr_itl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	.post_msg          = hptiop_post_msg_itl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	.post_req          = hptiop_post_req_itl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	.hw_dma_bit_mask   = 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	.reset_comm        = hptiop_reset_comm_itl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 	.host_phy_flag     = cpu_to_le64(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) static struct hptiop_adapter_ops hptiop_mv_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	.family            = MV_BASED_IOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	.iop_wait_ready    = iop_wait_ready_mv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	.internal_memalloc = hptiop_internal_memalloc_mv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	.internal_memfree  = hptiop_internal_memfree_mv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	.map_pci_bar       = hptiop_map_pci_bar_mv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	.unmap_pci_bar     = hptiop_unmap_pci_bar_mv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	.enable_intr       = hptiop_enable_intr_mv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	.disable_intr      = hptiop_disable_intr_mv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	.get_config        = iop_get_config_mv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	.set_config        = iop_set_config_mv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	.iop_intr          = iop_intr_mv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	.post_msg          = hptiop_post_msg_mv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	.post_req          = hptiop_post_req_mv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	.hw_dma_bit_mask   = 33,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	.reset_comm        = hptiop_reset_comm_mv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	.host_phy_flag     = cpu_to_le64(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	.family            = MVFREY_BASED_IOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	.iop_wait_ready    = iop_wait_ready_mvfrey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	.internal_memalloc = hptiop_internal_memalloc_mvfrey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	.internal_memfree  = hptiop_internal_memfree_mvfrey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	.map_pci_bar       = hptiop_map_pci_bar_mvfrey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	.unmap_pci_bar     = hptiop_unmap_pci_bar_mvfrey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	.enable_intr       = hptiop_enable_intr_mvfrey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	.disable_intr      = hptiop_disable_intr_mvfrey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	.get_config        = iop_get_config_mvfrey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	.set_config        = iop_set_config_mvfrey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	.iop_intr          = iop_intr_mvfrey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 	.post_msg          = hptiop_post_msg_mvfrey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	.post_req          = hptiop_post_req_mvfrey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	.hw_dma_bit_mask   = 64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	.reset_comm        = hptiop_reset_comm_mvfrey,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	.host_phy_flag     = cpu_to_le64(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) static struct pci_device_id hptiop_id_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	{ PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 	{ PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	{ PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 	{ PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 	{ PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	{ PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	{ PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	{ PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	{ PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	{ PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	{ PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	{ PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	{ PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	{ PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	{ PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 	{ PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	{ PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 	{ PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	{ PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	{ PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 	{ PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	{ PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	{ PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	{ PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	{ PCI_VDEVICE(TTI, 0x3610), (kernel_ulong_t)&hptiop_mvfrey_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	{ PCI_VDEVICE(TTI, 0x3611), (kernel_ulong_t)&hptiop_mvfrey_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	{ PCI_VDEVICE(TTI, 0x3620), (kernel_ulong_t)&hptiop_mvfrey_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	{ PCI_VDEVICE(TTI, 0x3622), (kernel_ulong_t)&hptiop_mvfrey_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	{ PCI_VDEVICE(TTI, 0x3640), (kernel_ulong_t)&hptiop_mvfrey_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	{ PCI_VDEVICE(TTI, 0x3660), (kernel_ulong_t)&hptiop_mvfrey_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	{ PCI_VDEVICE(TTI, 0x3680), (kernel_ulong_t)&hptiop_mvfrey_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 	{ PCI_VDEVICE(TTI, 0x3690), (kernel_ulong_t)&hptiop_mvfrey_ops },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) MODULE_DEVICE_TABLE(pci, hptiop_id_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) static struct pci_driver hptiop_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	.name       = driver_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	.id_table   = hptiop_id_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	.probe      = hptiop_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	.remove     = hptiop_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	.shutdown   = hptiop_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) static int __init hptiop_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	printk(KERN_INFO "%s %s\n", driver_name_long, driver_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	return pci_register_driver(&hptiop_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) static void __exit hptiop_module_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	pci_unregister_driver(&hptiop_pci_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) module_init(hptiop_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) module_exit(hptiop_module_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704)