Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  *  sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *  Copyright 2004-2005 Red Hat, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *  Author/maintainer:  Jeff Garzik <jgarzik@pobox.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *  This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *  License.  See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *  for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/blk-mq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/ktime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/hdreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/completion.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define CARM_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define CARM_VERBOSE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #undef CARM_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #undef CARM_VERBOSE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #undef CARM_NDEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define DRV_NAME "sx8"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define DRV_VERSION "1.0"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define PFX DRV_NAME ": "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) MODULE_AUTHOR("Jeff Garzik");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) MODULE_DESCRIPTION("Promise SATA SX8 block driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) MODULE_VERSION(DRV_VERSION);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * SX8 hardware has a single message queue for all ATA ports.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * When this driver was written, the hardware (firmware?) would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * corrupt data eventually, if more than one request was outstanding.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  * As one can imagine, having 8 ports bottlenecking on a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  * command hurts performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  * Based on user reports, later versions of the hardware (firmware?)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  * seem to be able to survive with more than one command queued.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  * Therefore, we default to the safe option -- 1 command -- but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  * allow the user to increase this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  * SX8 should be able to support up to ~60 queued commands (CARM_MAX_REQ),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  * but problems seem to occur when you exceed ~30, even on newer hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) static int max_queue = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) module_param(max_queue, int, 0444);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) MODULE_PARM_DESC(max_queue, "Maximum number of queued commands. (min==1, max==30, safe==1)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define NEXT_RESP(idx)	((idx + 1) % RMSG_Q_LEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) /* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define TAG_ENCODE(tag)	(((tag) << 16) | 0xf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define TAG_DECODE(tag)	(((tag) >> 16) & 0x1f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define TAG_VALID(tag)	((((tag) & 0xf) == 0xf) && (TAG_DECODE(tag) < 32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) /* note: prints function name for you */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #ifdef CARM_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #ifdef CARM_VERBOSE_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define VPRINTK(fmt, args...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #endif	/* CARM_VERBOSE_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #define DPRINTK(fmt, args...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define VPRINTK(fmt, args...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #endif	/* CARM_DEBUG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #ifdef CARM_NDEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define assert(expr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #define assert(expr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)         if(unlikely(!(expr))) {                                   \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)         printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	#expr, __FILE__, __func__, __LINE__);          \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)         }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) /* defines only for the constants which don't work well as enums */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) struct carm_host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	/* adapter-wide limits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	CARM_MAX_PORTS		= 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	CARM_SHM_SIZE		= (4096 << 7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	CARM_MINORS_PER_MAJOR	= 256 / CARM_MAX_PORTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	CARM_MAX_WAIT_Q		= CARM_MAX_PORTS + 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	/* command message queue limits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	CARM_MAX_REQ		= 64,	       /* max command msgs per host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	CARM_MSG_LOW_WATER	= (CARM_MAX_REQ / 4),	     /* refill mark */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	/* S/G limits, host-wide and per-request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	CARM_MAX_REQ_SG		= 32,	     /* max s/g entries per request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	CARM_MAX_HOST_SG	= 600,		/* max s/g entries per host */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	CARM_SG_LOW_WATER	= (CARM_MAX_HOST_SG / 4),   /* re-fill mark */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	/* hardware registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	CARM_IHQP		= 0x1c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	CARM_INT_STAT		= 0x10, /* interrupt status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	CARM_INT_MASK		= 0x14, /* interrupt mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	CARM_HMUC		= 0x18, /* host message unit control */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	RBUF_ADDR_LO		= 0x20, /* response msg DMA buf low 32 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	RBUF_ADDR_HI		= 0x24, /* response msg DMA buf high 32 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	RBUF_BYTE_SZ		= 0x28,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	CARM_RESP_IDX		= 0x2c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	CARM_CMS0		= 0x30, /* command message size reg 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	CARM_LMUC		= 0x48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	CARM_HMPHA		= 0x6c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	CARM_INITC		= 0xb5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	/* bits in CARM_INT_{STAT,MASK} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	INT_RESERVED		= 0xfffffff0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	INT_WATCHDOG		= (1 << 3),	/* watchdog timer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	INT_Q_OVERFLOW		= (1 << 2),	/* cmd msg q overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	INT_Q_AVAILABLE		= (1 << 1),	/* cmd msg q has free space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	INT_RESPONSE		= (1 << 0),	/* response msg available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	INT_ACK_MASK		= INT_WATCHDOG | INT_Q_OVERFLOW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	INT_DEF_MASK		= INT_RESERVED | INT_Q_OVERFLOW |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 				  INT_RESPONSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	/* command messages, and related register bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	CARM_HAVE_RESP		= 0x01,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	CARM_MSG_READ		= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	CARM_MSG_WRITE		= 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	CARM_MSG_VERIFY		= 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	CARM_MSG_GET_CAPACITY	= 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	CARM_MSG_FLUSH		= 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	CARM_MSG_IOCTL		= 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	CARM_MSG_ARRAY		= 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	CARM_MSG_MISC		= 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	CARM_CME		= (1 << 2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	CARM_RME		= (1 << 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	CARM_WZBC		= (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	CARM_RMI		= (1 << 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	CARM_Q_FULL		= (1 << 3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	CARM_MSG_SIZE		= 288,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	CARM_Q_LEN		= 48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	/* CARM_MSG_IOCTL messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	CARM_IOC_SCAN_CHAN	= 5,	/* scan channels for devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	CARM_IOC_GET_TCQ	= 13,	/* get tcq/ncq depth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	CARM_IOC_SET_TCQ	= 14,	/* set tcq/ncq depth */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	IOC_SCAN_CHAN_NODEV	= 0x1f,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	IOC_SCAN_CHAN_OFFSET	= 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	/* CARM_MSG_ARRAY messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	CARM_ARRAY_INFO		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	ARRAY_NO_EXIST		= (1 << 31),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	/* response messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	RMSG_SZ			= 8,	/* sizeof(struct carm_response) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	RMSG_Q_LEN		= 48,	/* resp. msg list length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	RMSG_OK			= 1,	/* bit indicating msg was successful */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 					/* length of entire resp. msg buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	RBUF_LEN		= RMSG_SZ * RMSG_Q_LEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	PDC_SHM_SIZE		= (4096 << 7), /* length of entire h/w buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	/* CARM_MSG_MISC messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	MISC_GET_FW_VER		= 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	MISC_ALLOC_MEM		= 3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	MISC_SET_TIME		= 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	/* MISC_GET_FW_VER feature bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	FW_VER_4PORT		= (1 << 2), /* 1=4 ports, 0=8 ports */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	FW_VER_NON_RAID		= (1 << 1), /* 1=non-RAID firmware, 0=RAID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	FW_VER_ZCR		= (1 << 0), /* zero channel RAID (whatever that is) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	/* carm_host flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	FL_NON_RAID		= FW_VER_NON_RAID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	FL_4PORT		= FW_VER_4PORT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	FL_FW_VER_MASK		= (FW_VER_NON_RAID | FW_VER_4PORT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	FL_DYN_MAJOR		= (1 << 17),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	CARM_SG_BOUNDARY	= 0xffffUL,	    /* s/g segment boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) enum scatter_gather_types {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	SGT_32BIT		= 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	SGT_64BIT		= 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) enum host_states {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	HST_INVALID,		/* invalid state; never used */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	HST_ALLOC_BUF,		/* setting up master SHM area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	HST_ERROR,		/* we never leave here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	HST_PORT_SCAN,		/* start dev scan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	HST_DEV_SCAN_START,	/* start per-device probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	HST_DEV_SCAN,		/* continue per-device probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	HST_DEV_ACTIVATE,	/* activate devices we found */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	HST_PROBE_FINISHED,	/* probe is complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	HST_PROBE_START,	/* initiate probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	HST_SYNC_TIME,		/* tell firmware what time it is */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	HST_GET_FW_VER,		/* get firmware version, adapter port cnt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) #ifdef CARM_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) static const char *state_name[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	"HST_INVALID",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	"HST_ALLOC_BUF",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	"HST_ERROR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	"HST_PORT_SCAN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	"HST_DEV_SCAN_START",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	"HST_DEV_SCAN",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	"HST_DEV_ACTIVATE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	"HST_PROBE_FINISHED",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	"HST_PROBE_START",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	"HST_SYNC_TIME",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	"HST_GET_FW_VER",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) struct carm_port {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	unsigned int			port_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	struct gendisk			*disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	struct carm_host		*host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	/* attached device characteristics */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	u64				capacity;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	char				name[41];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	u16				dev_geom_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	u16				dev_geom_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	u16				dev_geom_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) struct carm_request {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	int				n_elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	unsigned int			msg_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	unsigned int			msg_subtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	unsigned int			msg_bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	struct scatterlist		sg[CARM_MAX_REQ_SG];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) struct carm_host {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	unsigned long			flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	void				__iomem *mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	void				*shm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	dma_addr_t			shm_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	int				major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	int				id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	char				name[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	spinlock_t			lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	struct pci_dev			*pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	unsigned int			state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	u32				fw_ver;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	struct blk_mq_tag_set		tag_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	struct request_queue		*oob_q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	unsigned int			n_oob;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	unsigned int			hw_sg_used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	unsigned int			resp_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	unsigned int			wait_q_prod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	unsigned int			wait_q_cons;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	struct request_queue		*wait_q[CARM_MAX_WAIT_Q];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	void				*msg_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	dma_addr_t			msg_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	int				cur_scan_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	unsigned long			dev_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	unsigned long			dev_present;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	struct carm_port		port[CARM_MAX_PORTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	struct work_struct		fsm_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	struct completion		probe_comp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) struct carm_response {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	__le32 ret_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	__le32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) }  __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) struct carm_msg_sg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	__le32 start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	__le32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) }  __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) struct carm_msg_rw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	u8 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	u8 sg_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	u8 sg_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	__le32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	__le32 lba;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	__le16 lba_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	__le16 lba_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	struct carm_msg_sg sg[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) }  __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) struct carm_msg_allocbuf {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	u8 subtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	u8 n_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	u8 sg_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	__le32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	__le32 addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	__le32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	__le32 evt_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	__le32 n_evt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	__le32 rbuf_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	__le32 n_rbuf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	__le32 msg_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	__le32 n_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	struct carm_msg_sg sg[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) }  __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) struct carm_msg_ioctl {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	u8 subtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	u8 array_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	u8 reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	__le32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	__le32 data_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	u32 reserved2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) }  __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) struct carm_msg_sync_time {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	u8 subtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	u16 reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	__le32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	u32 reserved2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	__le32 timestamp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) }  __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) struct carm_msg_get_fw_ver {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	u8 type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	u8 subtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	u16 reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	__le32 handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	__le32 data_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	u32 reserved2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) }  __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) struct carm_fw_ver {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	__le32 version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	u8 features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	u8 reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	u16 reserved2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) }  __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) struct carm_array_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	__le32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	__le16 size_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	__le16 stripe_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	__le32 mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	__le16 stripe_blk_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	__le16 reserved1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	__le16 cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	__le16 head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	__le16 sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	u8 array_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	u8 reserved2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	char name[40];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	__le32 array_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	/* device list continues beyond this point? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) }  __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) static void carm_remove_one (struct pci_dev *pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) static const struct pci_device_id carm_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	{ PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	{ PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	{ }	/* terminate list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) MODULE_DEVICE_TABLE(pci, carm_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) static struct pci_driver carm_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 	.name		= DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	.id_table	= carm_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	.probe		= carm_init_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	.remove		= carm_remove_one,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) static const struct block_device_operations carm_bd_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	.owner		= THIS_MODULE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	.getgeo		= carm_bdev_getgeo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) static unsigned int carm_host_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) static unsigned long carm_major_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	struct carm_port *port = bdev->bd_disk->private_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	geo->heads = (u8) port->dev_geom_head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	geo->sectors = (u8) port->dev_geom_sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	geo->cylinders = port->dev_geom_cyl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) static inline int carm_lookup_bucket(u32 msg_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		if (msg_size <= msg_sizes[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 			return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	return -ENOENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) static void carm_init_buckets(void __iomem *mmio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		writel(msg_sizes[i], mmio + CARM_CMS0 + (4 * i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) static inline void *carm_ref_msg(struct carm_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 				 unsigned int msg_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	return host->msg_base + (msg_idx * CARM_MSG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 					  unsigned int msg_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	return host->msg_dma + (msg_idx * CARM_MSG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) static int carm_send_msg(struct carm_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			 struct carm_request *crq, unsigned tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	void __iomem *mmio = host->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	u32 msg = (u32) carm_ref_msg_dma(host, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	u32 cm_bucket = crq->msg_bucket;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	VPRINTK("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	tmp = readl(mmio + CARM_HMUC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	if (tmp & CARM_Q_FULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		tmp = readl(mmio + CARM_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		tmp |= INT_Q_AVAILABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		writel(tmp, mmio + CARM_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 		readl(mmio + CARM_INT_MASK);	/* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		DPRINTK("host msg queue full\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		rc = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		writel(msg | (cm_bucket << 1), mmio + CARM_IHQP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		readl(mmio + CARM_IHQP);	/* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) static int carm_array_info (struct carm_host *host, unsigned int array_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	struct carm_msg_ioctl *ioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	u32 msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	dma_addr_t msg_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	struct carm_request *crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	if (IS_ERR(rq)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	crq = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	ioc = carm_ref_msg(host, rq->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	msg_dma = carm_ref_msg_dma(host, rq->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	msg_data = (u32) (msg_dma + sizeof(struct carm_array_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	crq->msg_type = CARM_MSG_ARRAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	crq->msg_subtype = CARM_ARRAY_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	rc = carm_lookup_bucket(sizeof(struct carm_msg_ioctl) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 				sizeof(struct carm_array_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	BUG_ON(rc < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	crq->msg_bucket = (u32) rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	memset(ioc, 0, sizeof(*ioc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	ioc->type	= CARM_MSG_ARRAY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	ioc->subtype	= CARM_ARRAY_INFO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	ioc->array_id	= (u8) array_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	ioc->handle	= cpu_to_le32(TAG_ENCODE(rq->tag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	ioc->data_addr	= cpu_to_le32(msg_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	spin_lock_irq(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	assert(host->state == HST_DEV_SCAN_START ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	       host->state == HST_DEV_SCAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	spin_unlock_irq(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	spin_lock_irq(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	host->state = HST_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 	spin_unlock_irq(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) typedef unsigned int (*carm_sspc_t)(struct carm_host *, unsigned int, void *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) static int carm_send_special (struct carm_host *host, carm_sspc_t func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	struct carm_request *crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	struct carm_msg_ioctl *ioc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	void *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	unsigned int msg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	rq = blk_mq_alloc_request(host->oob_q, REQ_OP_DRV_OUT, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	if (IS_ERR(rq))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	crq = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	mem = carm_ref_msg(host, rq->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	msg_size = func(host, rq->tag, mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	ioc = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	crq->msg_type = ioc->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	crq->msg_subtype = ioc->subtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	rc = carm_lookup_bucket(msg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	BUG_ON(rc < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	crq->msg_bucket = (u32) rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	blk_execute_rq_nowait(host->oob_q, NULL, rq, true, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) static unsigned int carm_fill_sync_time(struct carm_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 					unsigned int idx, void *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	struct carm_msg_sync_time *st = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	time64_t tv = ktime_get_real_seconds();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	memset(st, 0, sizeof(*st));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	st->type	= CARM_MSG_MISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	st->subtype	= MISC_SET_TIME;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	st->handle	= cpu_to_le32(TAG_ENCODE(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	st->timestamp	= cpu_to_le32(tv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	return sizeof(struct carm_msg_sync_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) static unsigned int carm_fill_alloc_buf(struct carm_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 					unsigned int idx, void *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	struct carm_msg_allocbuf *ab = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	memset(ab, 0, sizeof(*ab));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	ab->type	= CARM_MSG_MISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	ab->subtype	= MISC_ALLOC_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	ab->handle	= cpu_to_le32(TAG_ENCODE(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	ab->n_sg	= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	ab->sg_type	= SGT_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	ab->addr	= cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	ab->len		= cpu_to_le32(PDC_SHM_SIZE >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	ab->evt_pool	= cpu_to_le32(host->shm_dma + (16 * 1024));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	ab->n_evt	= cpu_to_le32(1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	ab->rbuf_pool	= cpu_to_le32(host->shm_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	ab->n_rbuf	= cpu_to_le32(RMSG_Q_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	ab->msg_pool	= cpu_to_le32(host->shm_dma + RBUF_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	ab->n_msg	= cpu_to_le32(CARM_Q_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	ab->sg[0].start	= cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	ab->sg[0].len	= cpu_to_le32(65536);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	return sizeof(struct carm_msg_allocbuf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) static unsigned int carm_fill_scan_channels(struct carm_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 					    unsigned int idx, void *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	struct carm_msg_ioctl *ioc = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			      IOC_SCAN_CHAN_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	memset(ioc, 0, sizeof(*ioc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	ioc->type	= CARM_MSG_IOCTL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	ioc->subtype	= CARM_IOC_SCAN_CHAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	ioc->handle	= cpu_to_le32(TAG_ENCODE(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	ioc->data_addr	= cpu_to_le32(msg_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	/* fill output data area with "no device" default values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	mem += IOC_SCAN_CHAN_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	memset(mem, IOC_SCAN_CHAN_NODEV, CARM_MAX_PORTS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	return IOC_SCAN_CHAN_OFFSET + CARM_MAX_PORTS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 					 unsigned int idx, void *mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	struct carm_msg_get_fw_ver *ioc = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + sizeof(*ioc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	memset(ioc, 0, sizeof(*ioc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	ioc->type	= CARM_MSG_MISC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	ioc->subtype	= MISC_GET_FW_VER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	ioc->handle	= cpu_to_le32(TAG_ENCODE(idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	ioc->data_addr	= cpu_to_le32(msg_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	return sizeof(struct carm_msg_get_fw_ver) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	       sizeof(struct carm_fw_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) static inline void carm_push_q (struct carm_host *host, struct request_queue *q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	blk_mq_stop_hw_queues(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	VPRINTK("STOPPED QUEUE %p\n", q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	host->wait_q[idx] = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 	host->wait_q_prod++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) static inline struct request_queue *carm_pop_q(struct carm_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	unsigned int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	if (host->wait_q_prod == host->wait_q_cons)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	idx = host->wait_q_cons % CARM_MAX_WAIT_Q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	host->wait_q_cons++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	return host->wait_q[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) static inline void carm_round_robin(struct carm_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	struct request_queue *q = carm_pop_q(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	if (q) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		blk_mq_start_hw_queues(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		VPRINTK("STARTED QUEUE %p\n", q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) static inline enum dma_data_direction carm_rq_dir(struct request *rq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	return op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) static blk_status_t carm_queue_rq(struct blk_mq_hw_ctx *hctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 				  const struct blk_mq_queue_data *bd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	struct request_queue *q = hctx->queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	struct request *rq = bd->rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	struct carm_port *port = q->queuedata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	struct carm_host *host = port->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	struct carm_request *crq = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	struct carm_msg_rw *msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	int i, n_elem = 0, rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	unsigned int msg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	crq->n_elem = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	sg_init_table(crq->sg, CARM_MAX_REQ_SG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	blk_mq_start_request(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	spin_lock_irq(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	if (req_op(rq) == REQ_OP_DRV_OUT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 		goto send_msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	/* get scatterlist from block layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	sg = &crq->sg[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	n_elem = blk_rq_map_sg(q, rq, sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	if (n_elem <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		goto out_ioerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	/* map scatterlist to PCI bus addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	n_elem = dma_map_sg(&host->pdev->dev, sg, n_elem, carm_rq_dir(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	if (n_elem <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		goto out_ioerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	/* obey global hardware limit on S/G entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	if (host->hw_sg_used >= CARM_MAX_HOST_SG - n_elem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		goto out_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	crq->n_elem = n_elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	host->hw_sg_used += n_elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	 * build read/write message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	VPRINTK("build msg\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	msg = (struct carm_msg_rw *) carm_ref_msg(host, rq->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	if (rq_data_dir(rq) == WRITE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		msg->type = CARM_MSG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		crq->msg_type = CARM_MSG_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		msg->type = CARM_MSG_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		crq->msg_type = CARM_MSG_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	msg->id		= port->port_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	msg->sg_count	= n_elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	msg->sg_type	= SGT_32BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	msg->handle	= cpu_to_le32(TAG_ENCODE(rq->tag));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	msg->lba	= cpu_to_le32(blk_rq_pos(rq) & 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	tmp		= (blk_rq_pos(rq) >> 16) >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	msg->lba_high	= cpu_to_le16( (u16) tmp );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	msg->lba_count	= cpu_to_le16(blk_rq_sectors(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	for (i = 0; i < n_elem; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		struct carm_msg_sg *carm_sg = &msg->sg[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		carm_sg->start = cpu_to_le32(sg_dma_address(&crq->sg[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		carm_sg->len = cpu_to_le32(sg_dma_len(&crq->sg[i]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		msg_size += sizeof(struct carm_msg_sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	rc = carm_lookup_bucket(msg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	BUG_ON(rc < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	crq->msg_bucket = (u32) rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) send_msg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	 * queue read/write message to hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	VPRINTK("send msg, tag == %u\n", rq->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	rc = carm_send_msg(host, crq, rq->tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		host->hw_sg_used -= n_elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		goto out_resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	spin_unlock_irq(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	return BLK_STS_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) out_resource:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	dma_unmap_sg(&host->pdev->dev, &crq->sg[0], n_elem, carm_rq_dir(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	carm_push_q(host, q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	spin_unlock_irq(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	return BLK_STS_DEV_RESOURCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) out_ioerr:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	carm_round_robin(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	spin_unlock_irq(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	return BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) static void carm_handle_array_info(struct carm_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 				   struct carm_request *crq, u8 *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 				   blk_status_t error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	struct carm_port *port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	u8 *msg_data = mem + sizeof(struct carm_array_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	struct carm_array_info *desc = (struct carm_array_info *) msg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	u64 lo, hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	int cur_port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	size_t slen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	DPRINTK("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	cur_port = host->cur_scan_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	/* should never occur */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	if ((cur_port < 0) || (cur_port >= CARM_MAX_PORTS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 		printk(KERN_ERR PFX "BUG: cur_scan_dev==%d, array_id==%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		       cur_port, (int) desc->array_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	port = &host->port[cur_port];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	lo = (u64) le32_to_cpu(desc->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	hi = (u64) le16_to_cpu(desc->size_hi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	port->capacity = lo | (hi << 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	port->dev_geom_head = le16_to_cpu(desc->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	port->dev_geom_sect = le16_to_cpu(desc->sect);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	port->dev_geom_cyl = le16_to_cpu(desc->cyl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	host->dev_active |= (1 << cur_port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	strncpy(port->name, desc->name, sizeof(port->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	port->name[sizeof(port->name) - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	slen = strlen(port->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	while (slen && (port->name[slen - 1] == ' ')) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 		port->name[slen - 1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		slen--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	printk(KERN_INFO DRV_NAME "(%s): port %u device %Lu sectors\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	       pci_name(host->pdev), port->port_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	       (unsigned long long) port->capacity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	printk(KERN_INFO DRV_NAME "(%s): port %u device \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	       pci_name(host->pdev), port->port_no, port->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	assert(host->state == HST_DEV_SCAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	schedule_work(&host->fsm_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) static void carm_handle_scan_chan(struct carm_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 				  struct carm_request *crq, u8 *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 				  blk_status_t error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	unsigned int i, dev_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	int new_state = HST_DEV_SCAN_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	DPRINTK("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	if (error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 		new_state = HST_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	/* TODO: scan and support non-disk devices */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		if (msg_data[i] == 0) { /* direct-access device (disk) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			host->dev_present |= (1 << i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			dev_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	printk(KERN_INFO DRV_NAME "(%s): found %u interesting devices\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	       pci_name(host->pdev), dev_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	assert(host->state == HST_PORT_SCAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	host->state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	schedule_work(&host->fsm_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) static void carm_handle_generic(struct carm_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 				struct carm_request *crq, blk_status_t error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 				int cur_state, int next_state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	DPRINTK("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	assert(host->state == cur_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		host->state = HST_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 		host->state = next_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	schedule_work(&host->fsm_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) static inline void carm_handle_resp(struct carm_host *host,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 				    __le32 ret_handle_le, u32 status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	u32 handle = le32_to_cpu(ret_handle_le);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	unsigned int msg_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	struct request *rq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	struct carm_request *crq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	blk_status_t error = (status == RMSG_OK) ? 0 : BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	u8 *mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	VPRINTK("ENTER, handle == 0x%x\n", handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	if (unlikely(!TAG_VALID(handle))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		printk(KERN_ERR DRV_NAME "(%s): BUG: invalid tag 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		       pci_name(host->pdev), handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	msg_idx = TAG_DECODE(handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	VPRINTK("tag == %u\n", msg_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	rq = blk_mq_tag_to_rq(host->tag_set.tags[0], msg_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	crq = blk_mq_rq_to_pdu(rq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	/* fast path */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	if (likely(crq->msg_type == CARM_MSG_READ ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		   crq->msg_type == CARM_MSG_WRITE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		dma_unmap_sg(&host->pdev->dev, &crq->sg[0], crq->n_elem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			     carm_rq_dir(rq));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	mem = carm_ref_msg(host, msg_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	switch (crq->msg_type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	case CARM_MSG_IOCTL: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		switch (crq->msg_subtype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		case CARM_IOC_SCAN_CHAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			carm_handle_scan_chan(host, crq, mem, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 			/* unknown / invalid response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 			goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	case CARM_MSG_MISC: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		switch (crq->msg_subtype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		case MISC_ALLOC_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			carm_handle_generic(host, crq, error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 					    HST_ALLOC_BUF, HST_SYNC_TIME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		case MISC_SET_TIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 			carm_handle_generic(host, crq, error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 					    HST_SYNC_TIME, HST_GET_FW_VER);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		case MISC_GET_FW_VER: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 			struct carm_fw_ver *ver = (struct carm_fw_ver *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 				(mem + sizeof(struct carm_msg_get_fw_ver));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 			if (!error) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 				host->fw_ver = le32_to_cpu(ver->version);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 				host->flags |= (ver->features & FL_FW_VER_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			carm_handle_generic(host, crq, error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 					    HST_GET_FW_VER, HST_PORT_SCAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			/* unknown / invalid response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 			goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	case CARM_MSG_ARRAY: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		switch (crq->msg_subtype) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		case CARM_ARRAY_INFO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 			carm_handle_array_info(host, crq, mem, error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 			/* unknown / invalid response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 			goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		/* unknown / invalid response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	       pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	error = BLK_STS_IOERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	host->hw_sg_used -= crq->n_elem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	blk_mq_end_request(blk_mq_rq_from_pdu(crq), error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	if (host->hw_sg_used <= CARM_SG_LOW_WATER)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		carm_round_robin(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) static inline void carm_handle_responses(struct carm_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	void __iomem *mmio = host->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	struct carm_response *resp = (struct carm_response *) host->shm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	unsigned int work = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	unsigned int idx = host->resp_idx % RMSG_Q_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		u32 status = le32_to_cpu(resp[idx].status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		if (status == 0xffffffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			VPRINTK("ending response on index %u\n", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 			writel(idx << 3, mmio + CARM_RESP_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		/* response to a message we sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		else if ((status & (1 << 31)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 			VPRINTK("handling msg response on index %u\n", idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 			carm_handle_resp(host, resp[idx].ret_handle, status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 			resp[idx].status = cpu_to_le32(0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		/* asynchronous events the hardware throws our way */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		else if ((status & 0xff000000) == (1 << 31)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			u8 *evt_type_ptr = (u8 *) &resp[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			u8 evt_type = *evt_type_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			printk(KERN_WARNING DRV_NAME "(%s): unhandled event type %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			       pci_name(host->pdev), (int) evt_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			resp[idx].status = cpu_to_le32(0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		idx = NEXT_RESP(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		work++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	VPRINTK("EXIT, work==%u\n", work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	host->resp_idx += work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static irqreturn_t carm_interrupt(int irq, void *__host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	struct carm_host *host = __host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	void __iomem *mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	u32 mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	int handled = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	if (!host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		VPRINTK("no host\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	mmio = host->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	/* reading should also clear interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	mask = readl(mmio + CARM_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	if (mask == 0 || mask == 0xffffffff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		VPRINTK("no work, mask == 0x%x\n", mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	if (mask & INT_ACK_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		writel(mask, mmio + CARM_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	if (unlikely(host->state == HST_INVALID)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		VPRINTK("not initialized yet, mask = 0x%x\n", mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	if (mask & CARM_HAVE_RESP) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		handled = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		carm_handle_responses(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	VPRINTK("EXIT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	return IRQ_RETVAL(handled);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) static void carm_fsm_task (struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	struct carm_host *host =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		container_of(work, struct carm_host, fsm_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	unsigned int state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	int rc, i, next_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	int reschedule = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 	int new_state = HST_INVALID;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	state = host->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	DPRINTK("ENTER, state == %s\n", state_name[state]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	switch (state) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	case HST_PROBE_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		new_state = HST_ALLOC_BUF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		reschedule = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	case HST_ALLOC_BUF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 		rc = carm_send_special(host, carm_fill_alloc_buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 			new_state = HST_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 			reschedule = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	case HST_SYNC_TIME:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		rc = carm_send_special(host, carm_fill_sync_time);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			new_state = HST_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 			reschedule = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	case HST_GET_FW_VER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		rc = carm_send_special(host, carm_fill_get_fw_ver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 			new_state = HST_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 			reschedule = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	case HST_PORT_SCAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		rc = carm_send_special(host, carm_fill_scan_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			new_state = HST_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 			reschedule = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	case HST_DEV_SCAN_START:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		host->cur_scan_dev = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		new_state = HST_DEV_SCAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		reschedule = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	case HST_DEV_SCAN:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 		next_dev = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		for (i = host->cur_scan_dev + 1; i < CARM_MAX_PORTS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 			if (host->dev_present & (1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 				next_dev = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 		if (next_dev >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 			host->cur_scan_dev = next_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 			rc = carm_array_info(host, next_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 			if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 				new_state = HST_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 				reschedule = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 			new_state = HST_DEV_ACTIVATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 			reschedule = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	case HST_DEV_ACTIVATE: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 		int activated = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		for (i = 0; i < CARM_MAX_PORTS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 			if (host->dev_active & (1 << i)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 				struct carm_port *port = &host->port[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 				struct gendisk *disk = port->disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 				set_capacity(disk, port->capacity);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 				add_disk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 				activated++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 		printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		       pci_name(host->pdev), activated);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		new_state = HST_PROBE_FINISHED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		reschedule = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	case HST_PROBE_FINISHED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		complete(&host->probe_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	case HST_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		/* FIXME: TODO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		/* should never occur */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		printk(KERN_ERR PFX "BUG: unknown state %d\n", state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		assert(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	if (new_state != HST_INVALID) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		spin_lock_irqsave(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		host->state = new_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		spin_unlock_irqrestore(&host->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	if (reschedule)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		schedule_work(&host->fsm_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) static int carm_init_wait(void __iomem *mmio, u32 bits, unsigned int test_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	for (i = 0; i < 50000; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		u32 tmp = readl(mmio + CARM_LMUC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		udelay(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		if (test_bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 			if ((tmp & bits) == bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 			if ((tmp & bits) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	printk(KERN_ERR PFX "carm_init_wait timeout, bits == 0x%x, test_bit == %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	       bits, test_bit ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) static void carm_init_responses(struct carm_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	void __iomem *mmio = host->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	struct carm_response *resp = (struct carm_response *) host->shm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	for (i = 0; i < RMSG_Q_LEN; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		resp[i].status = cpu_to_le32(0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	writel(0, mmio + CARM_RESP_IDX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) static int carm_init_host(struct carm_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	void __iomem *mmio = host->mmio;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 	u32 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	u8 tmp8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 	DPRINTK("ENTER\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	writel(0, mmio + CARM_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	tmp8 = readb(mmio + CARM_INITC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	if (tmp8 & 0x01) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		tmp8 &= ~0x01;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		writeb(tmp8, mmio + CARM_INITC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		readb(mmio + CARM_INITC);	/* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		DPRINTK("snooze...\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		msleep(5000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	tmp = readl(mmio + CARM_HMUC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	if (tmp & CARM_CME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		DPRINTK("CME bit present, waiting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		rc = carm_init_wait(mmio, CARM_CME, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 			DPRINTK("EXIT, carm_init_wait 1 failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	if (tmp & CARM_RME) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		DPRINTK("RME bit present, waiting\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		rc = carm_init_wait(mmio, CARM_RME, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 			DPRINTK("EXIT, carm_init_wait 2 failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 			return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	tmp &= ~(CARM_RME | CARM_CME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	writel(tmp, mmio + CARM_HMUC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	readl(mmio + CARM_HMUC);	/* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		DPRINTK("EXIT, carm_init_wait 3 failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	carm_init_buckets(mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	writel(host->shm_dma & 0xffffffff, mmio + RBUF_ADDR_LO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	writel((host->shm_dma >> 16) >> 16, mmio + RBUF_ADDR_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	writel(RBUF_LEN, mmio + RBUF_BYTE_SZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	tmp = readl(mmio + CARM_HMUC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	tmp |= (CARM_RME | CARM_CME | CARM_WZBC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	writel(tmp, mmio + CARM_HMUC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	readl(mmio + CARM_HMUC);	/* flush */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		DPRINTK("EXIT, carm_init_wait 4 failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	writel(0, mmio + CARM_HMPHA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	writel(INT_DEF_MASK, mmio + CARM_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	carm_init_responses(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	/* start initialization, probing state machine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	spin_lock_irq(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	assert(host->state == HST_INVALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	host->state = HST_PROBE_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	spin_unlock_irq(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	schedule_work(&host->fsm_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	DPRINTK("EXIT\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) static const struct blk_mq_ops carm_mq_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	.queue_rq	= carm_queue_rq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) static int carm_init_disk(struct carm_host *host, unsigned int port_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	struct carm_port *port = &host->port[port_no];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	struct gendisk *disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	port->host = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	port->port_no = port_no;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	disk = alloc_disk(CARM_MINORS_PER_MAJOR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	if (!disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	port->disk = disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	sprintf(disk->disk_name, DRV_NAME "/%u",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 		(unsigned int)host->id * CARM_MAX_PORTS + port_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	disk->major = host->major;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	disk->first_minor = port_no * CARM_MINORS_PER_MAJOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	disk->fops = &carm_bd_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	disk->private_data = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	q = blk_mq_init_queue(&host->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	if (IS_ERR(q))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		return PTR_ERR(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	blk_queue_max_segments(q, CARM_MAX_REQ_SG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	q->queuedata = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	disk->queue = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) static void carm_free_disk(struct carm_host *host, unsigned int port_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	struct carm_port *port = &host->port[port_no];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	struct gendisk *disk = port->disk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	if (!disk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	if (disk->flags & GENHD_FL_UP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		del_gendisk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	if (disk->queue)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		blk_cleanup_queue(disk->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	put_disk(disk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) static int carm_init_shm(struct carm_host *host)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	host->shm = dma_alloc_coherent(&host->pdev->dev, CARM_SHM_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 				       &host->shm_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	if (!host->shm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	host->msg_base = host->shm + RBUF_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	host->msg_dma = host->shm_dma + RBUF_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	memset(host->shm, 0xff, RBUF_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	memset(host->msg_base, 0, PDC_SHM_SIZE - RBUF_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	struct carm_host *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	struct request_queue *q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	printk_once(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	rc = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	rc = pci_request_regions(pdev, DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 		printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 			pci_name(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		goto err_out_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	host = kzalloc(sizeof(*host), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	if (!host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		printk(KERN_ERR DRV_NAME "(%s): memory alloc failure\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 		       pci_name(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		goto err_out_regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	host->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	spin_lock_init(&host->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	INIT_WORK(&host->fsm_task, carm_fsm_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 	init_completion(&host->probe_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	host->mmio = ioremap(pci_resource_start(pdev, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 			     pci_resource_len(pdev, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	if (!host->mmio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		printk(KERN_ERR DRV_NAME "(%s): MMIO alloc failure\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		       pci_name(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		goto err_out_kfree;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	rc = carm_init_shm(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 		printk(KERN_ERR DRV_NAME "(%s): DMA SHM alloc failure\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		       pci_name(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		goto err_out_iounmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	memset(&host->tag_set, 0, sizeof(host->tag_set));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	host->tag_set.ops = &carm_mq_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	host->tag_set.cmd_size = sizeof(struct carm_request);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	host->tag_set.nr_hw_queues = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	host->tag_set.nr_maps = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	host->tag_set.queue_depth = max_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	host->tag_set.numa_node = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	host->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	rc = blk_mq_alloc_tag_set(&host->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 		goto err_out_dma_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	q = blk_mq_init_queue(&host->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	if (IS_ERR(q)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		rc = PTR_ERR(q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		blk_mq_free_tag_set(&host->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		goto err_out_dma_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 	host->oob_q = q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	q->queuedata = host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 	 * Figure out which major to use: 160, 161, or dynamic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	if (!test_and_set_bit(0, &carm_major_alloc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		host->major = 160;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	else if (!test_and_set_bit(1, &carm_major_alloc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		host->major = 161;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		host->flags |= FL_DYN_MAJOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	host->id = carm_host_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	sprintf(host->name, DRV_NAME "%d", carm_host_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	rc = register_blkdev(host->major, host->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		goto err_out_free_majors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	if (host->flags & FL_DYN_MAJOR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 		host->major = rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	for (i = 0; i < CARM_MAX_PORTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 		rc = carm_init_disk(host, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 			goto err_out_blkdev_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	rc = request_irq(pdev->irq, carm_interrupt, IRQF_SHARED, DRV_NAME, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		printk(KERN_ERR DRV_NAME "(%s): irq alloc failure\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		       pci_name(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 		goto err_out_blkdev_disks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	rc = carm_init_host(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		goto err_out_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	DPRINTK("waiting for probe_comp\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	wait_for_completion(&host->probe_comp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	       host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 	       (unsigned long long)pci_resource_start(pdev, 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		   pdev->irq, host->major);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	carm_host_id++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	pci_set_drvdata(pdev, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) err_out_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	free_irq(pdev->irq, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) err_out_blkdev_disks:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	for (i = 0; i < CARM_MAX_PORTS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 		carm_free_disk(host, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	unregister_blkdev(host->major, host->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) err_out_free_majors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	if (host->major == 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		clear_bit(0, &carm_major_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	else if (host->major == 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 		clear_bit(1, &carm_major_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	blk_cleanup_queue(host->oob_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	blk_mq_free_tag_set(&host->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) err_out_dma_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) err_out_iounmap:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	iounmap(host->mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) err_out_kfree:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	kfree(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) err_out_regions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) static void carm_remove_one (struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	struct carm_host *host = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	if (!host) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		       pci_name(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	free_irq(pdev->irq, host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	for (i = 0; i < CARM_MAX_PORTS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		carm_free_disk(host, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	unregister_blkdev(host->major, host->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	if (host->major == 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		clear_bit(0, &carm_major_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	else if (host->major == 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		clear_bit(1, &carm_major_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	blk_cleanup_queue(host->oob_q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 	blk_mq_free_tag_set(&host->tag_set);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	dma_free_coherent(&pdev->dev, CARM_SHM_SIZE, host->shm, host->shm_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	iounmap(host->mmio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	kfree(host);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) module_pci_driver(carm_driver);