Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * RapidIO mport driver for Tsi721 PCIExpress-to-SRIO bridge
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright 2011 Integrated Device Technology, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Alexandre Bounine <alexandre.bounine@idt.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Chul Kim <chul.kim@idt.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/rio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/rio_drv.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/kfifo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "tsi721.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) u32 tsi_dbg_level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) module_param_named(dbg_level, tsi_dbg_level, uint, S_IWUSR | S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) static int pcie_mrrs = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) module_param(pcie_mrrs, int, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) MODULE_PARM_DESC(pcie_mrrs, "PCIe MRRS override value (0...5)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) static u8 mbox_sel = 0x0f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) module_param(mbox_sel, byte, S_IRUGO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) MODULE_PARM_DESC(mbox_sel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) 		 "RIO Messaging MBOX Selection Mask (default: 0x0f = all)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) static DEFINE_SPINLOCK(tsi721_maint_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * tsi721_lcread - read from local SREP config space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  * @mport: RapidIO master port info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  * @index: ID of RapdiIO interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  * @offset: Offset into configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * @len: Length (in bytes) of the maintenance transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  * @data: Value to be read into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * Generates a local SREP space read. Returns %0 on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * success or %-EINVAL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 			 int len, u32 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	if (len != sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 		return -EINVAL; /* only 32-bit access is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	*data = ioread32(priv->regs + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71)  * tsi721_lcwrite - write into local SREP config space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  * @mport: RapidIO master port info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  * @index: ID of RapdiIO interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  * @offset: Offset into configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  * @len: Length (in bytes) of the maintenance transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  * @data: Value to be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  * Generates a local write into SREP configuration space. Returns %0 on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  * success or %-EINVAL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 			  int len, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	if (len != sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 		return -EINVAL; /* only 32-bit access is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	iowrite32(data, priv->regs + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  * tsi721_maint_dma - Helper function to generate RapidIO maintenance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  *                    transactions using designated Tsi721 DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)  * @priv: pointer to tsi721 private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)  * @sys_size: RapdiIO transport system size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)  * @destid: Destination ID of transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  * @hopcount: Number of hops to target device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  * @offset: Offset into configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  * @len: Length (in bytes) of the maintenance transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  * @data: Location to be read from or write into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  * @do_wr: Operation flag (1 == MAINT_WR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106)  * Generates a RapidIO maintenance transaction (Read or Write).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107)  * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 			u16 destid, u8 hopcount, u32 offset, int len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 			u32 *data, int do_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 	void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	struct tsi721_dma_desc *bd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	u32 rd_count, swr_ptr, ch_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	int i, err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	u32 op = do_wr ? MAINT_WR : MAINT_RD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	spin_lock_irqsave(&tsi721_maint_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	bd_ptr = priv->mdma.bd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	/* Initialize DMA descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	bd_ptr[0].bcount = cpu_to_le32((sys_size << 26) | 0x04);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	bd_ptr[0].raddr_lo = cpu_to_le32((hopcount << 24) | offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	bd_ptr[0].raddr_hi = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	if (do_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 		bd_ptr[0].data[0] = cpu_to_be32p(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 		bd_ptr[0].data[0] = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	/* Start DMA operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	iowrite32(rd_count + 2,	regs + TSI721_DMAC_DWRCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	ioread32(regs + TSI721_DMAC_DWRCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	/* Wait until DMA transfer is finished */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	while ((ch_stat = ioread32(regs + TSI721_DMAC_STS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 							& TSI721_DMAC_STS_RUN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 		if (++i >= 5000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 			tsi_debug(MAINT, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 				"DMA[%d] read timeout ch_status=%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 				priv->mdma.ch_id, ch_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 			if (!do_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 				*data = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 			err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 			goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	if (ch_stat & TSI721_DMAC_STS_ABORT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 		/* If DMA operation aborted due to error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		 * reinitialize DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 		tsi_debug(MAINT, &priv->pdev->dev, "DMA ABORT ch_stat=%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 			  ch_stat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		tsi_debug(MAINT, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 			  "OP=%d : destid=%x hc=%x off=%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 			  do_wr ? MAINT_WR : MAINT_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 			  destid, hopcount, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 		iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 		iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		iowrite32(0, regs + TSI721_DMAC_DWRCNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		if (!do_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 			*data = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 		err = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	if (!do_wr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		*data = be32_to_cpu(bd_ptr[0].data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	 * Update descriptor status FIFO RD pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	 * NOTE: Skipping check and clear FIFO entries because we are waiting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	 * for transfer to be completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	spin_unlock_irqrestore(&tsi721_maint_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  * tsi721_cread_dma - Generate a RapidIO maintenance read transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  *                    using Tsi721 BDMA engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  * @mport: RapidIO master port control structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203)  * @index: ID of RapdiIO interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204)  * @destid: Destination ID of transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205)  * @hopcount: Number of hops to target device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206)  * @offset: Offset into configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207)  * @len: Length (in bytes) of the maintenance transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208)  * @val: Location to be read into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210)  * Generates a RapidIO maintenance read transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211)  * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 			u8 hopcount, u32 offset, int len, u32 *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 				offset, len, data, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  * tsi721_cwrite_dma - Generate a RapidIO maintenance write transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  *                     using Tsi721 BDMA engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  * @mport: RapidIO master port control structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  * @index: ID of RapdiIO interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  * @destid: Destination ID of transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  * @hopcount: Number of hops to target device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  * @offset: Offset into configuration space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  * @len: Length (in bytes) of the maintenance transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231)  * @val: Value to be written
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233)  * Generates a RapidIO maintenance write transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234)  * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 			 u8 hopcount, u32 offset, int len, u32 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	u32 temp = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 				offset, len, &temp, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  * @priv:  tsi721 device private structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250)  * Handles inbound port-write interrupts. Copies PW message from an internal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  * buffer into PW message FIFO and schedules deferred routine to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252)  * queued messages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) tsi721_pw_handler(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	u32 pw_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	pw_stat = ioread32(priv->regs + TSI721_RIO_PW_RX_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	if (pw_stat & TSI721_RIO_PW_RX_STAT_PW_VAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		pw_buf[0] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		pw_buf[1] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		pw_buf[2] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		pw_buf[3] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		/* Queue PW message (if there is room in FIFO),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		 * otherwise discard it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		spin_lock(&priv->pw_fifo_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		if (kfifo_avail(&priv->pw_fifo) >= TSI721_RIO_PW_MSG_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 			kfifo_in(&priv->pw_fifo, pw_buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 						TSI721_RIO_PW_MSG_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 			priv->pw_discard_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		spin_unlock(&priv->pw_fifo_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	/* Clear pending PW interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 		  priv->regs + TSI721_RIO_PW_RX_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	schedule_work(&priv->pw_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) static void tsi721_pw_dpc(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	struct tsi721_device *priv = container_of(work, struct tsi721_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 						    pw_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	union rio_pw_msg pwmsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	 * Process port-write messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)&pwmsg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 			 TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		/* Pass the port-write message to RIO core for processing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		rio_inb_pwrite_handler(&priv->mport, &pwmsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307)  * tsi721_pw_enable - enable/disable port-write interface init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308)  * @mport: Master port implementing the port write unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309)  * @enable:    1=enable; 0=disable port-write message handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) static int tsi721_pw_enable(struct rio_mport *mport, int enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	u32 rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	rval = ioread32(priv->regs + TSI721_RIO_EM_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	if (enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 		rval |= TSI721_RIO_EM_INT_ENABLE_PW_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		rval &= ~TSI721_RIO_EM_INT_ENABLE_PW_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	/* Clear pending PW interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		  priv->regs + TSI721_RIO_PW_RX_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	/* Update enable bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	iowrite32(rval, priv->regs + TSI721_RIO_EM_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  * tsi721_dsend - Send a RapidIO doorbell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  * @mport: RapidIO master port info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  * @index: ID of RapidIO interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  * @destid: Destination ID of target device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  * @data: 16-bit info field of RapidIO doorbell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339)  * Sends a RapidIO doorbell message. Always returns %0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) static int tsi721_dsend(struct rio_mport *mport, int index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 			u16 destid, u16 data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	u32 offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 		 (destid << 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	tsi_debug(DBELL, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		  "Send Doorbell 0x%04x to destID 0x%x", data, destid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	iowrite16be(data, priv->odb_base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358)  * tsi721_dbell_handler - Tsi721 doorbell interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359)  * @priv: tsi721 device-specific data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  * Handles inbound doorbell interrupts. Copies doorbell entry from an internal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  * buffer into DB message FIFO and schedules deferred  routine to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  * queued DBs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) tsi721_dbell_handler(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	u32 regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	/* Disable IDB interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	regval &= ~TSI721_SR_CHINT_IDBQRCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	iowrite32(regval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	schedule_work(&priv->idb_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) static void tsi721_db_dpc(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	struct tsi721_device *priv = container_of(work, struct tsi721_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 						    idb_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	struct rio_mport *mport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	struct rio_dbell *dbell;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	u32 wr_ptr, rd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	u64 *idb_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	u32 regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		u64 msg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		u8  bytes[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	} idb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	 * Process queued inbound doorbells
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	mport = &priv->mport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	while (wr_ptr != rd_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 		idb_entry = (u64 *)(priv->idb_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 					(TSI721_IDB_ENTRY_SIZE * rd_ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		rd_ptr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		rd_ptr %= IDB_QSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		idb.msg = *idb_entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		*idb_entry = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		/* Process one doorbell */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		list_for_each_entry(dbell, &mport->dbells, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 			if ((dbell->res->start <= DBELL_INF(idb.bytes)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 			    (dbell->res->end >= DBELL_INF(idb.bytes))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 				found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		if (found) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 			dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 				    DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 			tsi_debug(DBELL, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 				  "spurious IDB sid %2.2x tid %2.2x info %4.4x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 				  DBELL_SID(idb.bytes), DBELL_TID(idb.bytes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 				  DBELL_INF(idb.bytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		wr_ptr = ioread32(priv->regs +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 				  TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	iowrite32(rd_ptr & (IDB_QSIZE - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	/* Re-enable IDB interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	regval |= TSI721_SR_CHINT_IDBQRCV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	iowrite32(regval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	if (wr_ptr != rd_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 		schedule_work(&priv->idb_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450)  * tsi721_irqhandler - Tsi721 interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451)  * @irq: Linux interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452)  * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454)  * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455)  * interrupt events and calls an event-specific handler(s).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	struct tsi721_device *priv = (struct tsi721_device *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	u32 dev_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	u32 dev_ch_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	u32 intval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	u32 ch_inte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	/* For MSI mode disable all device-level interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	if (priv->flags & TSI721_USING_MSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		iowrite32(0, priv->regs + TSI721_DEV_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	dev_int = ioread32(priv->regs + TSI721_DEV_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	if (!dev_int)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	dev_ch_int = ioread32(priv->regs + TSI721_DEV_CHAN_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	if (dev_int & TSI721_DEV_INT_SR2PC_CH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		/* Service SR2PC Channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		if (dev_ch_int & TSI721_INT_SR2PC_CHAN(IDB_QUEUE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 			/* Service Inbound Doorbell interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 			intval = ioread32(priv->regs +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 						TSI721_SR_CHINT(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			if (intval & TSI721_SR_CHINT_IDBQRCV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 				tsi721_dbell_handler(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 				tsi_info(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 					"Unsupported SR_CH_INT %x", intval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 			/* Clear interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 			iowrite32(intval,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 				priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 			ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	if (dev_int & TSI721_DEV_INT_SMSG_CH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		int ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		 * Service channel interrupts from Messaging Engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		if (dev_ch_int & TSI721_INT_IMSG_CHAN_M) { /* Inbound Msg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			/* Disable signaled OB MSG Channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			ch_inte &= ~(dev_ch_int & TSI721_INT_IMSG_CHAN_M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 			iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 			 * Process Inbound Message interrupt for each MBOX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 			for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 				if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 				tsi721_imsg_handler(priv, ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 		if (dev_ch_int & TSI721_INT_OMSG_CHAN_M) { /* Outbound Msg */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 			/* Disable signaled OB MSG Channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 			ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 			ch_inte &= ~(dev_ch_int & TSI721_INT_OMSG_CHAN_M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 			iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			 * Process Outbound Message interrupts for each MBOX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 			for (ch = 0; ch < RIO_MAX_MBOX; ch++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 				if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 				tsi721_omsg_handler(priv, ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	if (dev_int & TSI721_DEV_INT_SRIO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		/* Service SRIO MAC interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		if (intval & TSI721_RIO_EM_INT_STAT_PW_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 			tsi721_pw_handler(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) #ifdef CONFIG_RAPIDIO_DMA_ENGINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	if (dev_int & TSI721_DEV_INT_BDMA_CH) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		int ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 		if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			tsi_debug(DMA, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 				  "IRQ from DMA channel 0x%08x", dev_ch_int);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 				if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 				tsi721_bdma_handler(&priv->bdma[ch]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	/* For MSI mode re-enable device-level interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	if (priv->flags & TSI721_USING_MSI) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 			TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		iowrite32(dev_int, priv->regs + TSI721_DEV_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) static void tsi721_interrupts_init(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	u32 intr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	/* Enable IDB interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	iowrite32(TSI721_SR_CHINT_ALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	iowrite32(TSI721_SR_CHINT_IDBQRCV,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	/* Enable SRIO MAC interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		priv->regs + TSI721_RIO_EM_DEV_INT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	/* Enable interrupts from channels in use */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) #ifdef CONFIG_RAPIDIO_DMA_ENGINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		(TSI721_INT_BDMA_CHAN_M &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 		 ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 	iowrite32(intr,	priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 	if (priv->flags & TSI721_USING_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 		intr = TSI721_DEV_INT_SRIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 		intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 			TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	iowrite32(intr, priv->regs + TSI721_DEV_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	ioread32(priv->regs + TSI721_DEV_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605)  * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606)  * @irq: Linux interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607)  * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609)  * Handles outbound messaging interrupts signaled using MSI-X.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) static irqreturn_t tsi721_omsg_msix(int irq, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	struct tsi721_device *priv = (struct tsi721_device *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	int mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	tsi721_omsg_handler(priv, mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622)  * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623)  * @irq: Linux interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624)  * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626)  * Handles inbound messaging interrupts signaled using MSI-X.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) static irqreturn_t tsi721_imsg_msix(int irq, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	struct tsi721_device *priv = (struct tsi721_device *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	int mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	tsi721_imsg_handler(priv, mbox + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640)  * @irq: Linux interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641)  * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643)  * Handles Tsi721 interrupts from SRIO MAC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) static irqreturn_t tsi721_srio_msix(int irq, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	struct tsi721_device *priv = (struct tsi721_device *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	u32 srio_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	/* Service SRIO MAC interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		tsi721_pw_handler(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659)  * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660)  * @irq: Linux interrupt number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661)  * @ptr: Pointer to interrupt-specific data (tsi721_device structure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663)  * Handles Tsi721 interrupts from SR2PC Channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664)  * NOTE: At this moment services only one SR2PC channel associated with inbound
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665)  * doorbells.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	struct tsi721_device *priv = (struct tsi721_device *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	u32 sr_ch_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	/* Service Inbound DB interrupt from SR2PC channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		tsi721_dbell_handler(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	/* Clear interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	/* Read back to ensure that interrupt was cleared */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  * tsi721_request_msix - register interrupt service for MSI-X mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687)  * @priv: tsi721 device-specific data structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689)  * Registers MSI-X interrupt service routines for interrupts that are active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690)  * immediately after mport initialization. Messaging interrupt service routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691)  * should be registered during corresponding open requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) static int tsi721_request_msix(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	err = request_irq(priv->msix[TSI721_VECT_IDB].vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 			tsi721_sr2pc_ch_msix, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 			priv->msix[TSI721_VECT_IDB].irq_name, (void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	err = request_irq(priv->msix[TSI721_VECT_PWRX].vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			tsi721_srio_msix, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 			priv->msix[TSI721_VECT_PWRX].irq_name, (void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715)  * tsi721_enable_msix - Attempts to enable MSI-X support for Tsi721.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716)  * @priv: pointer to tsi721 private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718)  * Configures MSI-X support for Tsi721. Supports only an exact number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719)  * of requested vectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) static int tsi721_enable_msix(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	struct msix_entry entries[TSI721_VECT_MAX];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	 * Initialize MSI-X entries for Messaging Engine:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	 * this driver supports four RIO mailboxes (inbound and outbound)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	 * NOTE: Inbound message MBOX 0...4 use IB channels 4...7. Therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	 * offset +4 is added to IB MBOX number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	for (i = 0; i < RIO_MAX_MBOX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		entries[TSI721_VECT_IMB0_RCV + i].entry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 					TSI721_MSIX_IMSG_DQ_RCV(i + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		entries[TSI721_VECT_IMB0_INT + i].entry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 					TSI721_MSIX_IMSG_INT(i + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		entries[TSI721_VECT_OMB0_DONE + i].entry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 					TSI721_MSIX_OMSG_DONE(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		entries[TSI721_VECT_OMB0_INT + i].entry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 					TSI721_MSIX_OMSG_INT(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) #ifdef CONFIG_RAPIDIO_DMA_ENGINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	 * Initialize MSI-X entries for Block DMA Engine:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	 * this driver supports XXX DMA channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	 * (one is reserved for SRIO maintenance transactions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	for (i = 0; i < TSI721_DMA_CHNUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		entries[TSI721_VECT_DMA0_DONE + i].entry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 					TSI721_MSIX_DMACH_DONE(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		entries[TSI721_VECT_DMA0_INT + i].entry =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 					TSI721_MSIX_DMACH_INT(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	err = pci_enable_msix_exact(priv->pdev, entries, ARRAY_SIZE(entries));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		tsi_err(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			"Failed to enable MSI-X (err=%d)", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	 * Copy MSI-X vector information into tsi721 private structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		 DRV_NAME "-idb@pci:%s", pci_name(priv->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	snprintf(priv->msix[TSI721_VECT_PWRX].irq_name, IRQ_DEVICE_NAME_MAX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		 DRV_NAME "-pwrx@pci:%s", pci_name(priv->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	for (i = 0; i < RIO_MAX_MBOX; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		priv->msix[TSI721_VECT_IMB0_RCV + i].vector =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 				entries[TSI721_VECT_IMB0_RCV + i].vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 		snprintf(priv->msix[TSI721_VECT_IMB0_RCV + i].irq_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbr%d@pci:%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 			 i, pci_name(priv->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		priv->msix[TSI721_VECT_IMB0_INT + i].vector =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 				entries[TSI721_VECT_IMB0_INT + i].vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		snprintf(priv->msix[TSI721_VECT_IMB0_INT + i].irq_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbi%d@pci:%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 			 i, pci_name(priv->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		priv->msix[TSI721_VECT_OMB0_DONE + i].vector =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 				entries[TSI721_VECT_OMB0_DONE + i].vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 		snprintf(priv->msix[TSI721_VECT_OMB0_DONE + i].irq_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombd%d@pci:%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 			 i, pci_name(priv->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 		priv->msix[TSI721_VECT_OMB0_INT + i].vector =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 				entries[TSI721_VECT_OMB0_INT + i].vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		snprintf(priv->msix[TSI721_VECT_OMB0_INT + i].irq_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 			 i, pci_name(priv->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) #ifdef CONFIG_RAPIDIO_DMA_ENGINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	for (i = 0; i < TSI721_DMA_CHNUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		priv->msix[TSI721_VECT_DMA0_DONE + i].vector =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 				entries[TSI721_VECT_DMA0_DONE + i].vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			 i, pci_name(priv->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 		priv->msix[TSI721_VECT_DMA0_INT + i].vector =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 				entries[TSI721_VECT_DMA0_INT + i].vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			 i, pci_name(priv->pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) #endif /* CONFIG_PCI_MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) static int tsi721_request_irq(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	if (priv->flags & TSI721_USING_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		err = tsi721_request_msix(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		err = request_irq(priv->pdev->irq, tsi721_irqhandler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			  (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			  DRV_NAME, (void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		tsi_err(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 			"Unable to allocate interrupt, err=%d", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) static void tsi721_free_irq(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	if (priv->flags & TSI721_USING_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		free_irq(priv->msix[TSI721_VECT_IDB].vector, (void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		free_irq(priv->msix[TSI721_VECT_PWRX].vector, (void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	free_irq(priv->pdev->irq, (void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) tsi721_obw_alloc(struct tsi721_device *priv, struct tsi721_obw_bar *pbar,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		 u32 size, int *win_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	u64 win_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	u64 bar_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	u64 bar_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	u32 align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	struct tsi721_ob_win *win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	struct tsi721_ob_win *new_win = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	int new_win_idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	bar_base = pbar->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	bar_end =  bar_base + pbar->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	win_base = bar_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	align = size/TSI721_PC2SR_ZONES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	while (i < TSI721_IBWIN_NUM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		for (i = 0; i < TSI721_IBWIN_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 			if (!priv->ob_win[i].active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 				if (new_win == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 					new_win = &priv->ob_win[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 					new_win_idx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			 * If this window belongs to the current BAR check it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			 * for overlap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 			win = &priv->ob_win[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 			if (win->base >= bar_base && win->base < bar_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 				if (win_base < (win->base + win->size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 						(win_base + size) > win->base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 					/* Overlap detected */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 					win_base = win->base + win->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 					win_base = ALIGN(win_base, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	if (win_base + size > bar_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	if (!new_win) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		tsi_err(&priv->pdev->dev, "OBW count tracking failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	new_win->active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	new_win->base = win_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	new_win->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	new_win->pbar = pbar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	priv->obwin_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	pbar->free -= size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	*win_id = new_win_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) static int tsi721_map_outb_win(struct rio_mport *mport, u16 destid, u64 rstart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 			u32 size, u32 flags, dma_addr_t *laddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	struct tsi721_obw_bar *pbar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	struct tsi721_ob_win *ob_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	int obw = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	u32 rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	u64 rio_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	u32 zsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	tsi_debug(OBW, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		  "did=%d ra=0x%llx sz=0x%x", destid, rstart, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	if (!is_power_of_2(size) || (size < 0x8000) || (rstart & (size - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	if (priv->obwin_cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	for (i = 0; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		if (priv->p2r_bar[i].free >= size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			pbar = &priv->p2r_bar[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 			ret = tsi721_obw_alloc(priv, pbar, size, &obw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 			if (!ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	WARN_ON(obw == -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	ob_win = &priv->ob_win[obw];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	ob_win->destid = destid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	ob_win->rstart = rstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	tsi_debug(OBW, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		  "allocated OBW%d @%llx", obw, ob_win->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	 * Configure Outbound Window
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	zsize = size/TSI721_PC2SR_ZONES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	rio_addr = rstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	 * Program Address Translation Zones:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	 *  This implementation uses all 8 zones associated wit window.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	for (i = 0; i < TSI721_PC2SR_ZONES; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 		while (ioread32(priv->regs + TSI721_ZONE_SEL) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 			TSI721_ZONE_SEL_GO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 			udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		rval = (u32)(rio_addr & TSI721_LUT_DATA0_ADD) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 			TSI721_LUT_DATA0_NREAD | TSI721_LUT_DATA0_NWR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		iowrite32(rval, priv->regs + TSI721_LUT_DATA0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		rval = (u32)(rio_addr >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		iowrite32(rval, priv->regs + TSI721_LUT_DATA1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		rval = destid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		iowrite32(rval, priv->regs + TSI721_LUT_DATA2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		rval = TSI721_ZONE_SEL_GO | (obw << 3) | i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		iowrite32(rval, priv->regs + TSI721_ZONE_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		rio_addr += zsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	iowrite32(TSI721_OBWIN_SIZE(size) << 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		  priv->regs + TSI721_OBWINSZ(obw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	iowrite32((u32)(ob_win->base >> 32), priv->regs + TSI721_OBWINUB(obw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	iowrite32((u32)(ob_win->base & TSI721_OBWINLB_BA) | TSI721_OBWINLB_WEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		  priv->regs + TSI721_OBWINLB(obw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	*laddr = ob_win->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static void tsi721_unmap_outb_win(struct rio_mport *mport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 				  u16 destid, u64 rstart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	struct tsi721_ob_win *ob_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	tsi_debug(OBW, &priv->pdev->dev, "did=%d ra=0x%llx", destid, rstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	for (i = 0; i < TSI721_OBWIN_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		ob_win = &priv->ob_win[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		if (ob_win->active &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		    ob_win->destid == destid && ob_win->rstart == rstart) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			tsi_debug(OBW, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 				  "free OBW%d @%llx", i, ob_win->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			ob_win->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			iowrite32(0, priv->regs + TSI721_OBWINLB(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 			ob_win->pbar->free += ob_win->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			priv->obwin_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)  * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  * translation regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)  * @priv: pointer to tsi721 private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)  * Disables SREP translation regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	int i, z;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	u32 rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	/* Disable all PC2SR translation windows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	for (i = 0; i < TSI721_OBWIN_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		iowrite32(0, priv->regs + TSI721_OBWINLB(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	/* Initialize zone lookup tables to avoid ECC errors on reads */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	iowrite32(0, priv->regs + TSI721_LUT_DATA0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	iowrite32(0, priv->regs + TSI721_LUT_DATA1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	iowrite32(0, priv->regs + TSI721_LUT_DATA2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	for (i = 0; i < TSI721_OBWIN_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		for (z = 0; z < TSI721_PC2SR_ZONES; z++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 			while (ioread32(priv->regs + TSI721_ZONE_SEL) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 				TSI721_ZONE_SEL_GO) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 				udelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 			rval = TSI721_ZONE_SEL_GO | (i << 3) | z;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			iowrite32(rval, priv->regs + TSI721_ZONE_SEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	if (priv->p2r_bar[0].size == 0 && priv->p2r_bar[1].size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		priv->obwin_cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	priv->p2r_bar[0].free = priv->p2r_bar[0].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	priv->p2r_bar[1].free = priv->p2r_bar[1].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	for (i = 0; i < TSI721_OBWIN_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		priv->ob_win[i].active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	priv->obwin_cnt = TSI721_OBWIN_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)  * tsi721_rio_map_inb_mem -- Mapping inbound memory region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)  * @mport: RapidIO master port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)  * @lstart: Local memory space start address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)  * @rstart: RapidIO space start address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078)  * @size: The mapping region size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)  * @flags: Flags for mapping. 0 for using default flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)  * Return: 0 -- Success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083)  * This function will create the inbound mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)  * from rstart to lstart.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		u64 rstart, u64 size, u32 flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	int i, avail = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	u32 regval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	struct tsi721_ib_win *ib_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 	bool direct = (lstart == rstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	u64 ibw_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	dma_addr_t loc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	u64 ibw_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	struct tsi721_ib_win_mapping *map = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	int ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	/* Max IBW size supported by HW is 16GB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	if (size > 0x400000000UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	if (direct) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		/* Calculate minimal acceptable window size and base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		ibw_size = roundup_pow_of_two(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		ibw_start = lstart & ~(ibw_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		tsi_debug(IBW, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			"Direct (RIO_0x%llx -> PCIe_%pad), size=0x%llx, ibw_start = 0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 			rstart, &lstart, size, ibw_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		while ((lstart + size) > (ibw_start + ibw_size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			ibw_size *= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 			ibw_start = lstart & ~(ibw_size - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 			/* Check for crossing IBW max size 16GB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 			if (ibw_size > 0x400000000UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 				return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 		loc_start = ibw_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		map = kzalloc(sizeof(struct tsi721_ib_win_mapping), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		if (map == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		tsi_debug(IBW, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 			"Translated (RIO_0x%llx -> PCIe_%pad), size=0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 			rstart, &lstart, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 		if (!is_power_of_2(size) || size < 0x1000 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		    ((u64)lstart & (size - 1)) || (rstart & (size - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		if (priv->ibwin_cnt == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 			return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		ibw_start = rstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		ibw_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		loc_start = lstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	 * Scan for overlapping with active regions and mark the first available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	 * IB window at the same time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	for (i = 0; i < TSI721_IBWIN_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		ib_win = &priv->ib_win[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		if (!ib_win->active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 			if (avail == -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 				avail = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 				ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		} else if (ibw_start < (ib_win->rstart + ib_win->size) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 			   (ibw_start + ibw_size) > ib_win->rstart) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 			/* Return error if address translation involved */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 			if (!direct || ib_win->xlat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 				ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 			 * Direct mappings usually are larger than originally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 			 * requested fragments - check if this new request fits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 			 * into it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			if (rstart >= ib_win->rstart &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			    (rstart + size) <= (ib_win->rstart +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 							ib_win->size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 				/* We are in - no further mapping required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 				map->lstart = lstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 				list_add_tail(&map->node, &ib_win->mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 				return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 			ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	i = avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	/* Sanity check: available IB window must be disabled at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	regval = ioread32(priv->regs + TSI721_IBWIN_LB(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	if (WARN_ON(regval & TSI721_IBWIN_LB_WEN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		ret = -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	ib_win = &priv->ib_win[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	ib_win->active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	ib_win->rstart = ibw_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	ib_win->lstart = loc_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	ib_win->size = ibw_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	ib_win->xlat = (lstart != rstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	INIT_LIST_HEAD(&ib_win->mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	 * When using direct IBW mapping and have larger than requested IBW size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	 * we can have multiple local memory blocks mapped through the same IBW
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	 * To handle this situation we maintain list of "clients" for such IBWs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	if (direct) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		map->lstart = lstart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		list_add_tail(&map->node, &ib_win->mappings);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	iowrite32(TSI721_IBWIN_SIZE(ibw_size) << 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 			priv->regs + TSI721_IBWIN_SZ(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	iowrite32(((u64)loc_start >> 32), priv->regs + TSI721_IBWIN_TUA(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	iowrite32(((u64)loc_start & TSI721_IBWIN_TLA_ADD),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 		  priv->regs + TSI721_IBWIN_TLA(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	iowrite32(ibw_start >> 32, priv->regs + TSI721_IBWIN_UB(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	iowrite32((ibw_start & TSI721_IBWIN_LB_BA) | TSI721_IBWIN_LB_WEN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		priv->regs + TSI721_IBWIN_LB(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	priv->ibwin_cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	tsi_debug(IBW, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 		"Configured IBWIN%d (RIO_0x%llx -> PCIe_%pad), size=0x%llx",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		i, ibw_start, &loc_start, ibw_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)  * tsi721_rio_unmap_inb_mem -- Unmapping inbound memory region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)  * @mport: RapidIO master port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237)  * @lstart: Local memory space start address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 				dma_addr_t lstart)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	struct tsi721_ib_win *ib_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	tsi_debug(IBW, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		"Unmap IBW mapped to PCIe_%pad", &lstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	/* Search for matching active inbound translation window */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	for (i = 0; i < TSI721_IBWIN_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		ib_win = &priv->ib_win[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 		/* Address translating IBWs must to be an exact march */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		if (!ib_win->active ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		    (ib_win->xlat && lstart != ib_win->lstart))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		if (lstart >= ib_win->lstart &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		    lstart < (ib_win->lstart + ib_win->size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 			if (!ib_win->xlat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 				struct tsi721_ib_win_mapping *map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 				int found = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 				list_for_each_entry(map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 						    &ib_win->mappings, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 					if (map->lstart == lstart) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 						list_del(&map->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 						kfree(map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 						found = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 						break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 					}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 				if (!found)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 					continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 				if (!list_empty(&ib_win->mappings))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 			tsi_debug(IBW, &priv->pdev->dev, "Disable IBWIN_%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 			iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 			ib_win->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 			priv->ibwin_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	if (i == TSI721_IBWIN_NUM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		tsi_debug(IBW, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 			"IB window mapped to %pad not found", &lstart);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)  * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)  * translation regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)  * @priv: pointer to tsi721 private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300)  * Disables inbound windows.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	/* Disable all SR2PC inbound windows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	for (i = 0; i < TSI721_IBWIN_NUM; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	priv->ibwin_cnt = TSI721_IBWIN_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313)  * tsi721_close_sr2pc_mapping - closes all active inbound (SRIO->PCIe)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)  * translation regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315)  * @priv: pointer to tsi721 device private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) static void tsi721_close_sr2pc_mapping(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	struct tsi721_ib_win *ib_win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	/* Disable all active SR2PC inbound windows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	for (i = 0; i < TSI721_IBWIN_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		ib_win = &priv->ib_win[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 		if (ib_win->active) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 			iowrite32(0, priv->regs + TSI721_IBWIN_LB(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 			ib_win->active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)  * tsi721_port_write_init - Inbound port write interface init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)  * @priv: pointer to tsi721 private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)  * Initializes inbound port write handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)  * Returns %0 on success or %-ENOMEM on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) static int tsi721_port_write_init(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	priv->pw_discard_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	INIT_WORK(&priv->pw_work, tsi721_pw_dpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	spin_lock_init(&priv->pw_fifo_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	if (kfifo_alloc(&priv->pw_fifo,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 			TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		tsi_err(&priv->pdev->dev, "PW FIFO allocation failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	/* Use reliable port-write capture mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	iowrite32(TSI721_RIO_PW_CTL_PWC_REL, priv->regs + TSI721_RIO_PW_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) static void tsi721_port_write_free(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	kfifo_free(&priv->pw_fifo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) static int tsi721_doorbell_init(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	/* Outbound Doorbells do not require any setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	 * Tsi721 uses dedicated PCI BAR1 to generate doorbells.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	 * That BAR1 was mapped during the probe routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	/* Initialize Inbound Doorbell processing DPC and queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	priv->db_discard_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	INIT_WORK(&priv->idb_work, tsi721_db_dpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	/* Allocate buffer for inbound doorbells queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 					    IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 					    &priv->idb_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	if (!priv->idb_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	tsi_debug(DBELL, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		  "Allocated IDB buffer @ %p (phys = %pad)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		  priv->idb_base, &priv->idb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	iowrite32(((u64)priv->idb_dma >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		priv->regs + TSI721_IDQ_BASEU(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	iowrite32(((u64)priv->idb_dma & TSI721_IDQ_BASEL_ADDR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		priv->regs + TSI721_IDQ_BASEL(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	/* Enable accepting all inbound doorbells */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	iowrite32(0, priv->regs + TSI721_IDQ_MASK(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	iowrite32(TSI721_IDQ_INIT, priv->regs + TSI721_IDQ_CTL(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	iowrite32(0, priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) static void tsi721_doorbell_free(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	if (priv->idb_base == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	/* Free buffer allocated for inbound doorbell queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	dma_free_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 			  priv->idb_base, priv->idb_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	priv->idb_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)  * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)  * @priv: pointer to tsi721 private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)  * Initialize BDMA channel allocated for RapidIO maintenance read/write
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414)  * request generation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415)  * Returns %0 on success or %-ENOMEM on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) static int tsi721_bdma_maint_init(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	struct tsi721_dma_desc *bd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	u64		*sts_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	dma_addr_t	bd_phys, sts_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	int		sts_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	int		bd_num = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	void __iomem	*regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	tsi_debug(MAINT, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 		  "Init BDMA_%d Maintenance requests", TSI721_DMACH_MAINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	 * Initialize DMA channel for maintenance requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	priv->mdma.ch_id = TSI721_DMACH_MAINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	/* Allocate space for DMA descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 				    bd_num * sizeof(struct tsi721_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 				    &bd_phys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	if (!bd_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	priv->mdma.bd_num = bd_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	priv->mdma.bd_phys = bd_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	priv->mdma.bd_base = bd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	tsi_debug(MAINT, &priv->pdev->dev, "DMA descriptors @ %p (phys = %pad)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		  bd_ptr, &bd_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	/* Allocate space for descriptor status FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 					bd_num : TSI721_DMA_MINSTSSZ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	sts_size = roundup_pow_of_two(sts_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 				     sts_size * sizeof(struct tsi721_dma_sts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 				     &sts_phys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	if (!sts_ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 		/* Free space allocated for DMA descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		dma_free_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 				  bd_num * sizeof(struct tsi721_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 				  bd_ptr, bd_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		priv->mdma.bd_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	priv->mdma.sts_phys = sts_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	priv->mdma.sts_base = sts_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	priv->mdma.sts_size = sts_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	tsi_debug(MAINT, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		"desc status FIFO @ %p (phys = %pad) size=0x%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		sts_ptr, &sts_phys, sts_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	/* Initialize DMA descriptors ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 						 TSI721_DMAC_DPTRL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 	/* Setup DMA descriptor pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	iowrite32(((u64)bd_phys >> 32),	regs + TSI721_DMAC_DPTRH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 		regs + TSI721_DMAC_DPTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	/* Setup descriptor status FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 	iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 		regs + TSI721_DMAC_DSBL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		regs + TSI721_DMAC_DSSZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	/* Clear interrupt bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	ioread32(regs + TSI721_DMAC_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	/* Toggle DMA channel initialization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	iowrite32(TSI721_DMAC_CTL_INIT,	regs + TSI721_DMAC_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	ioread32(regs + TSI721_DMAC_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) static int tsi721_bdma_maint_free(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	u32 ch_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	struct tsi721_bdma_maint *mdma = &priv->mdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 	void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	if (mdma->bd_base == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	/* Check if DMA channel still running */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	ch_stat = ioread32(regs + TSI721_DMAC_STS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	if (ch_stat & TSI721_DMAC_STS_RUN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 		return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 	/* Put DMA channel into init state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	iowrite32(TSI721_DMAC_CTL_INIT,	regs + TSI721_DMAC_CTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 	/* Free space allocated for DMA descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	dma_free_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		mdma->bd_num * sizeof(struct tsi721_dma_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		mdma->bd_base, mdma->bd_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	mdma->bd_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 	/* Free space allocated for status FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	dma_free_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		mdma->sts_size * sizeof(struct tsi721_dma_sts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 		mdma->sts_base, mdma->sts_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	mdma->sts_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) /* Enable Inbound Messaging Interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 				  u32 inte_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	u32 rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	if (!inte_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	/* Clear pending Inbound Messaging interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	/* Enable Inbound Messaging interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	if (priv->flags & TSI721_USING_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		return; /* Finished if we are in MSI-X mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	 * For MSI and INTA interrupt signalling we need to enable next levels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	/* Enable Device Channel Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	iowrite32(rval | TSI721_INT_IMSG_CHAN(ch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 		  priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) /* Disable Inbound Messaging Interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 				   u32 inte_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	u32 rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	if (!inte_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 	/* Clear pending Inbound Messaging interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	/* Disable Inbound Messaging interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 	rval &= ~inte_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	if (priv->flags & TSI721_USING_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 		return; /* Finished if we are in MSI-X mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	 * For MSI and INTA interrupt signalling we need to disable next levels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 	/* Disable Device Channel Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 	rval &= ~TSI721_INT_IMSG_CHAN(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) /* Enable Outbound Messaging interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 				  u32 inte_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	u32 rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 	if (!inte_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	/* Clear pending Outbound Messaging interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	/* Enable Outbound Messaging channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	if (priv->flags & TSI721_USING_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 		return; /* Finished if we are in MSI-X mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	 * For MSI and INTA interrupt signalling we need to enable next levels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	/* Enable Device Channel Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	iowrite32(rval | TSI721_INT_OMSG_CHAN(ch),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		  priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) /* Disable Outbound Messaging interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 				   u32 inte_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	u32 rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	if (!inte_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	/* Clear pending Outbound Messaging interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	/* Disable Outbound Messaging interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	rval &= ~inte_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	if (priv->flags & TSI721_USING_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		return; /* Finished if we are in MSI-X mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	 * For MSI and INTA interrupt signalling we need to disable next levels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	/* Disable Device Channel Interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 	rval &= ~TSI721_INT_OMSG_CHAN(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)  * tsi721_add_outb_message - Add message to the Tsi721 outbound message queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)  * @mport: Master port with outbound message queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661)  * @rdev: Target of outbound message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662)  * @mbox: Outbound mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663)  * @buffer: Message to add to outbound queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664)  * @len: Length of message
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 			void *buffer, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	struct tsi721_omsg_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	u32 tx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	if (!priv->omsg_init[mbox] ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 	    len > TSI721_MSG_MAX_SIZE || len < 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 	spin_lock_irqsave(&priv->omsg_ring[mbox].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	tx_slot = priv->omsg_ring[mbox].tx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 	/* Copy copy message into transfer buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	memcpy(priv->omsg_ring[mbox].omq_base[tx_slot], buffer, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 	if (len & 0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		len += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	/* Build descriptor associated with buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	desc = priv->omsg_ring[mbox].omd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 	desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) #ifdef TSI721_OMSG_DESC_INT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	/* Request IOF_DONE interrupt generation for each N-th frame in queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	if (tx_slot % 4 == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 	desc[tx_slot].msg_info =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		cpu_to_le32((mport->sys_size << 26) | (mbox << 22) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 			    (0xe << 12) | (len & 0xff8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 	desc[tx_slot].bufptr_lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 		cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 			    0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	desc[tx_slot].bufptr_hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 		cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 	priv->omsg_ring[mbox].wr_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	/* Go to next descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	if (++priv->omsg_ring[mbox].tx_slot == priv->omsg_ring[mbox].size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		priv->omsg_ring[mbox].tx_slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 		/* Move through the ring link descriptor at the end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		priv->omsg_ring[mbox].wr_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	/* Set new write count value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	iowrite32(priv->omsg_ring[mbox].wr_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 	ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	spin_unlock_irqrestore(&priv->omsg_ring[mbox].lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728)  * tsi721_omsg_handler - Outbound Message Interrupt Handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729)  * @priv: pointer to tsi721 private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730)  * @ch:   number of OB MSG channel to service
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732)  * Services channel interrupts from outbound messaging engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) static void tsi721_omsg_handler(struct tsi721_device *priv, int ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	u32 omsg_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	struct rio_mport *mport = &priv->mport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	void *dev_id = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	u32 tx_slot = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	int do_callback = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	spin_lock(&priv->omsg_ring[ch].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	if (omsg_int & TSI721_OBDMAC_INT_ST_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		tsi_info(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 			"OB MBOX%d: Status FIFO is full", ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		u32 srd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 		u64 *sts_ptr, last_ptr = 0, prev_ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		 * Find last successfully processed descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		/* Check and clear descriptor status FIFO entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		srd_ptr = priv->omsg_ring[ch].sts_rdptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 		sts_ptr = priv->omsg_ring[ch].sts_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 		j = srd_ptr * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 		while (sts_ptr[j]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 			for (i = 0; i < 8 && sts_ptr[j]; i++, j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 				prev_ptr = last_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 				last_ptr = le64_to_cpu(sts_ptr[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 				sts_ptr[j] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 			++srd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 			srd_ptr %= priv->omsg_ring[ch].sts_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 			j = srd_ptr * 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 		if (last_ptr == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 			goto no_sts_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 		priv->omsg_ring[ch].sts_rdptr = srd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 		iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 		if (!mport->outb_msg[ch].mcback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 			goto no_sts_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 		/* Inform upper layer about transfer completion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 		tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 						sizeof(struct tsi721_omsg_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 		 * Check if this is a Link Descriptor (LD).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 		 * If yes, ignore LD and use descriptor processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		 * before LD.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		if (tx_slot == priv->omsg_ring[ch].size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 			if (prev_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 				tx_slot = (prev_ptr -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 					(u64)priv->omsg_ring[ch].omd_phys)/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 						sizeof(struct tsi721_omsg_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 				goto no_sts_update;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 		if (tx_slot >= priv->omsg_ring[ch].size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 			tsi_debug(OMSG, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 				  "OB_MSG tx_slot=%x > size=%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 				  tx_slot, priv->omsg_ring[ch].size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 		WARN_ON(tx_slot >= priv->omsg_ring[ch].size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		/* Move slot index to the next message to be sent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 		++tx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 		if (tx_slot == priv->omsg_ring[ch].size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 			tx_slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		dev_id = priv->omsg_ring[ch].dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 		do_callback = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) no_sts_update:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	if (omsg_int & TSI721_OBDMAC_INT_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		* Outbound message operation aborted due to error,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		* reinitialize OB MSG channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 		*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 		tsi_debug(OMSG, &priv->pdev->dev, "OB MSG ABORT ch_stat=%x",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 			  ioread32(priv->regs + TSI721_OBDMAC_STS(ch)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 		iowrite32(TSI721_OBDMAC_INT_ERROR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 				priv->regs + TSI721_OBDMAC_INT(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 				priv->regs + TSI721_OBDMAC_CTL(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		ioread32(priv->regs + TSI721_OBDMAC_CTL(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 		/* Inform upper level to clear all pending tx slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 		dev_id = priv->omsg_ring[ch].dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		tx_slot = priv->omsg_ring[ch].tx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		do_callback = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		/* Synch tx_slot tracking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		iowrite32(priv->omsg_ring[ch].tx_slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 			priv->regs + TSI721_OBDMAC_DRDCNT(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 		ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 		priv->omsg_ring[ch].sts_rdptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	/* Clear channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 	if (!(priv->flags & TSI721_USING_MSIX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 		u32 ch_inte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		/* Re-enable channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 		ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 		ch_inte |= TSI721_INT_OMSG_CHAN(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	spin_unlock(&priv->omsg_ring[ch].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 	if (mport->outb_msg[ch].mcback && do_callback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 		mport->outb_msg[ch].mcback(mport, dev_id, ch, tx_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867)  * tsi721_open_outb_mbox - Initialize Tsi721 outbound mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868)  * @mport: Master port implementing Outbound Messaging Engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)  * @dev_id: Device specific pointer to pass on event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)  * @mbox: Mailbox to open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871)  * @entries: Number of entries in the outbound mailbox ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 				 int mbox, int entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	struct tsi721_omsg_desc *bd_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	int i, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	if ((entries < TSI721_OMSGD_MIN_RING_SIZE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	    (entries > (TSI721_OMSGD_RING_SIZE)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	    (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	if ((mbox_sel & (1 << mbox)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 		rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	priv->omsg_ring[mbox].dev_id = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	priv->omsg_ring[mbox].size = entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	priv->omsg_ring[mbox].sts_rdptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 	spin_lock_init(&priv->omsg_ring[mbox].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	/* Outbound Msg Buffer allocation based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 	   the number of maximum descriptor entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 	for (i = 0; i < entries; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 		priv->omsg_ring[mbox].omq_base[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 			dma_alloc_coherent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 				&priv->pdev->dev, TSI721_MSG_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 				&priv->omsg_ring[mbox].omq_phys[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 				GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		if (priv->omsg_ring[mbox].omq_base[i] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 			tsi_debug(OMSG, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 				  "ENOMEM for OB_MSG_%d data buffer", mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 			rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 			goto out_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 	/* Outbound message descriptor allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	priv->omsg_ring[mbox].omd_base = dma_alloc_coherent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 				&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 				(entries + 1) * sizeof(struct tsi721_omsg_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 				&priv->omsg_ring[mbox].omd_phys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	if (priv->omsg_ring[mbox].omd_base == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 		tsi_debug(OMSG, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 			"ENOMEM for OB_MSG_%d descriptor memory", mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		goto out_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	priv->omsg_ring[mbox].tx_slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	/* Outbound message descriptor status FIFO allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 							    priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 							    &priv->omsg_ring[mbox].sts_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 							    GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 	if (priv->omsg_ring[mbox].sts_base == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		tsi_debug(OMSG, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 			"ENOMEM for OB_MSG_%d status FIFO", mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		goto out_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 	 * Configure Outbound Messaging Engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	/* Setup Outbound Message descriptor pointer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	iowrite32(((u64)priv->omsg_ring[mbox].omd_phys >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 			priv->regs + TSI721_OBDMAC_DPTRH(mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	iowrite32(((u64)priv->omsg_ring[mbox].omd_phys &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 					TSI721_OBDMAC_DPTRL_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 			priv->regs + TSI721_OBDMAC_DPTRL(mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	/* Setup Outbound Message descriptor status FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	iowrite32(((u64)priv->omsg_ring[mbox].sts_phys >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 			priv->regs + TSI721_OBDMAC_DSBH(mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	iowrite32(((u64)priv->omsg_ring[mbox].sts_phys &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 					TSI721_OBDMAC_DSBL_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 			priv->regs + TSI721_OBDMAC_DSBL(mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	iowrite32(TSI721_DMAC_DSSZ_SIZE(priv->omsg_ring[mbox].sts_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		priv->regs + (u32)TSI721_OBDMAC_DSSZ(mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	/* Enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 	if (priv->flags & TSI721_USING_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 		int idx = TSI721_VECT_OMB0_DONE + mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		/* Request interrupt service if we are in MSI-X mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 		rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 				 priv->msix[idx].irq_name, (void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 			tsi_debug(OMSG, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 				"Unable to get MSI-X IRQ for OBOX%d-DONE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 				mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 			goto out_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		idx = TSI721_VECT_OMB0_INT + mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		rc = request_irq(priv->msix[idx].vector, tsi721_omsg_msix, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 				 priv->msix[idx].irq_name, (void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		if (rc)	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 			tsi_debug(OMSG, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 				"Unable to get MSI-X IRQ for MBOX%d-INT", mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 			idx = TSI721_VECT_OMB0_DONE + mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 			free_irq(priv->msix[idx].vector, (void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 			goto out_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) #endif /* CONFIG_PCI_MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	tsi721_omsg_interrupt_enable(priv, mbox, TSI721_OBDMAC_INT_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	/* Initialize Outbound Message descriptors ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	bd_ptr = priv->omsg_ring[mbox].omd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	bd_ptr[entries].msg_info = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	bd_ptr[entries].next_lo =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 		TSI721_OBDMAC_DPTRL_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	bd_ptr[entries].next_hi =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 		cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	priv->omsg_ring[mbox].wr_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	/* Initialize Outbound Message engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	iowrite32(TSI721_OBDMAC_CTL_RETRY_THR | TSI721_OBDMAC_CTL_INIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		  priv->regs + TSI721_OBDMAC_CTL(mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	priv->omsg_init[mbox] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) out_stat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	dma_free_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 		priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		priv->omsg_ring[mbox].sts_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 		priv->omsg_ring[mbox].sts_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	priv->omsg_ring[mbox].sts_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) #endif /* CONFIG_PCI_MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) out_desc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	dma_free_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 		(entries + 1) * sizeof(struct tsi721_omsg_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		priv->omsg_ring[mbox].omd_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		priv->omsg_ring[mbox].omd_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	priv->omsg_ring[mbox].omd_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) out_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	for (i = 0; i < priv->omsg_ring[mbox].size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		if (priv->omsg_ring[mbox].omq_base[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 			dma_free_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 				TSI721_MSG_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 				priv->omsg_ring[mbox].omq_base[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 				priv->omsg_ring[mbox].omq_phys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 			priv->omsg_ring[mbox].omq_base[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050)  * tsi721_close_outb_mbox - Close Tsi721 outbound mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)  * @mport: Master port implementing the outbound message unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052)  * @mbox: Mailbox to close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	if (!priv->omsg_init[mbox])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	priv->omsg_init[mbox] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 	/* Disable Interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 	tsi721_omsg_interrupt_disable(priv, mbox, TSI721_OBDMAC_INT_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	if (priv->flags & TSI721_USING_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 		free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 			 (void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 			 (void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) #endif /* CONFIG_PCI_MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	/* Free OMSG Descriptor Status FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	dma_free_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 		priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 		priv->omsg_ring[mbox].sts_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 		priv->omsg_ring[mbox].sts_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 	priv->omsg_ring[mbox].sts_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	/* Free OMSG descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	dma_free_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 		(priv->omsg_ring[mbox].size + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 			sizeof(struct tsi721_omsg_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 		priv->omsg_ring[mbox].omd_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 		priv->omsg_ring[mbox].omd_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 	priv->omsg_ring[mbox].omd_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	/* Free message buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	for (i = 0; i < priv->omsg_ring[mbox].size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 		if (priv->omsg_ring[mbox].omq_base[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 			dma_free_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 				TSI721_MSG_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 				priv->omsg_ring[mbox].omq_base[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 				priv->omsg_ring[mbox].omq_phys[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 			priv->omsg_ring[mbox].omq_base[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)  * tsi721_imsg_handler - Inbound Message Interrupt Handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108)  * @priv: pointer to tsi721 private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109)  * @ch: inbound message channel number to service
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)  * Services channel interrupts from inbound messaging engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) static void tsi721_imsg_handler(struct tsi721_device *priv, int ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	u32 mbox = ch - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	u32 imsg_int;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	struct rio_mport *mport = &priv->mport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	spin_lock(&priv->imsg_ring[mbox].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	if (imsg_int & TSI721_IBDMAC_INT_SRTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 		tsi_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout", mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 	if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 		tsi_info(&priv->pdev->dev, "IB MBOX%d PCIe error", mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 	if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 		tsi_info(&priv->pdev->dev, "IB MBOX%d IB free queue low", mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	/* Clear IB channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	/* If an IB Msg is received notify the upper layer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 	if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 		mport->inb_msg[mbox].mcback)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 		mport->inb_msg[mbox].mcback(mport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 				priv->imsg_ring[mbox].dev_id, mbox, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	if (!(priv->flags & TSI721_USING_MSIX)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		u32 ch_inte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) 		/* Re-enable channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 		ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 		ch_inte |= TSI721_INT_IMSG_CHAN(ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) 		iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	spin_unlock(&priv->imsg_ring[mbox].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154)  * tsi721_open_inb_mbox - Initialize Tsi721 inbound mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155)  * @mport: Master port implementing the Inbound Messaging Engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156)  * @dev_id: Device specific pointer to pass on event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157)  * @mbox: Mailbox to open
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158)  * @entries: Number of entries in the inbound mailbox ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 				int mbox, int entries)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	int ch = mbox + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 	u64 *free_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 	if ((entries < TSI721_IMSGD_MIN_RING_SIZE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	    (entries > TSI721_IMSGD_RING_SIZE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 	    (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 		rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	if ((mbox_sel & (1 << mbox)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 		rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 	/* Initialize IB Messaging Ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 	priv->imsg_ring[mbox].dev_id = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 	priv->imsg_ring[mbox].size = entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 	priv->imsg_ring[mbox].rx_slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	priv->imsg_ring[mbox].desc_rdptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 	priv->imsg_ring[mbox].fq_wrptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	for (i = 0; i < priv->imsg_ring[mbox].size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 		priv->imsg_ring[mbox].imq_base[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) 	spin_lock_init(&priv->imsg_ring[mbox].lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) 	/* Allocate buffers for incoming messages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 	priv->imsg_ring[mbox].buf_base =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) 		dma_alloc_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 				   entries * TSI721_MSG_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 				   &priv->imsg_ring[mbox].buf_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 				   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	if (priv->imsg_ring[mbox].buf_base == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 		tsi_err(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 			"Failed to allocate buffers for IB MBOX%d", mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 	/* Allocate memory for circular free list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 	priv->imsg_ring[mbox].imfq_base =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 		dma_alloc_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 				   entries * 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 				   &priv->imsg_ring[mbox].imfq_phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 				   GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 	if (priv->imsg_ring[mbox].imfq_base == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 		tsi_err(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 			"Failed to allocate free queue for IB MBOX%d", mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 		goto out_buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 	/* Allocate memory for Inbound message descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 	priv->imsg_ring[mbox].imd_base =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 		dma_alloc_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 				   entries * sizeof(struct tsi721_imsg_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 				   &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 	if (priv->imsg_ring[mbox].imd_base == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 		tsi_err(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 			"Failed to allocate descriptor memory for IB MBOX%d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 			mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 		rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		goto out_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 	/* Fill free buffer pointer list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	free_ptr = priv->imsg_ring[mbox].imfq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 	for (i = 0; i < entries; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 		free_ptr[i] = cpu_to_le64(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 				(u64)(priv->imsg_ring[mbox].buf_phys) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 				i * 0x1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 	 * For mapping of inbound SRIO Messages into appropriate queues we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 	 * to set Inbound Device ID register in the messaging engine. We do it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 	 * once when first inbound mailbox is requested.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 	if (!(priv->flags & TSI721_IMSGID_SET)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		iowrite32((u32)priv->mport.host_deviceid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 			priv->regs + TSI721_IB_DEVID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 		priv->flags |= TSI721_IMSGID_SET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 	 * Configure Inbound Messaging channel (ch = mbox + 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 	/* Setup Inbound Message free queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 	iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 		priv->regs + TSI721_IBDMAC_FQBH(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 	iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 			TSI721_IBDMAC_FQBL_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		priv->regs+TSI721_IBDMAC_FQBL(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 	iowrite32(TSI721_DMAC_DSSZ_SIZE(entries),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		priv->regs + TSI721_IBDMAC_FQSZ(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 	/* Setup Inbound Message descriptor queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 	iowrite32(((u64)priv->imsg_ring[mbox].imd_phys >> 32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 		priv->regs + TSI721_IBDMAC_DQBH(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 	iowrite32(((u32)priv->imsg_ring[mbox].imd_phys &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 		   (u32)TSI721_IBDMAC_DQBL_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 		priv->regs+TSI721_IBDMAC_DQBL(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 	iowrite32(TSI721_DMAC_DSSZ_SIZE(entries),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 		priv->regs + TSI721_IBDMAC_DQSZ(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 	/* Enable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 	if (priv->flags & TSI721_USING_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		int idx = TSI721_VECT_IMB0_RCV + mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		/* Request interrupt service if we are in MSI-X mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 		rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 				 priv->msix[idx].irq_name, (void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) 		if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 			tsi_debug(IMSG, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) 				"Unable to get MSI-X IRQ for IBOX%d-DONE",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 				mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) 			goto out_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 		idx = TSI721_VECT_IMB0_INT + mbox;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 		rc = request_irq(priv->msix[idx].vector, tsi721_imsg_msix, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 				 priv->msix[idx].irq_name, (void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		if (rc)	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 			tsi_debug(IMSG, &priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 				"Unable to get MSI-X IRQ for IBOX%d-INT", mbox);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 			free_irq(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 				priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 				(void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 			goto out_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) #endif /* CONFIG_PCI_MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 	tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	/* Initialize Inbound Message Engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 	iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 	ioread32(priv->regs + TSI721_IBDMAC_CTL(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	udelay(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	priv->imsg_ring[mbox].fq_wrptr = entries - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 	iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	priv->imsg_init[mbox] = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) out_desc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	dma_free_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 		priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) 		priv->imsg_ring[mbox].imd_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) 		priv->imsg_ring[mbox].imd_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 	priv->imsg_ring[mbox].imd_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) #endif /* CONFIG_PCI_MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) out_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 	dma_free_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) 		priv->imsg_ring[mbox].size * 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 		priv->imsg_ring[mbox].imfq_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) 		priv->imsg_ring[mbox].imfq_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) 	priv->imsg_ring[mbox].imfq_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) out_buf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) 	dma_free_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 		priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) 		priv->imsg_ring[mbox].buf_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) 		priv->imsg_ring[mbox].buf_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	priv->imsg_ring[mbox].buf_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350)  * tsi721_close_inb_mbox - Shut down Tsi721 inbound mailbox
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351)  * @mport: Master port implementing the Inbound Messaging Engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352)  * @mbox: Mailbox to close
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 	u32 rx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 	int ch = mbox + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 	if (!priv->imsg_init[mbox]) /* mbox isn't initialized yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 	priv->imsg_init[mbox] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 	/* Disable Inbound Messaging Engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	/* Disable Interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 	tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 	if (priv->flags & TSI721_USING_MSIX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 		free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 				(void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 		free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 				(void *)priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) #endif /* CONFIG_PCI_MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 	/* Clear Inbound Buffer Queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	for (rx_slot = 0; rx_slot < priv->imsg_ring[mbox].size; rx_slot++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 		priv->imsg_ring[mbox].imq_base[rx_slot] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	/* Free memory allocated for message buffers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	dma_free_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 		priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 		priv->imsg_ring[mbox].buf_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 		priv->imsg_ring[mbox].buf_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 	priv->imsg_ring[mbox].buf_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 	/* Free memory allocated for free pointr list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 	dma_free_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 		priv->imsg_ring[mbox].size * 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 		priv->imsg_ring[mbox].imfq_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 		priv->imsg_ring[mbox].imfq_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	priv->imsg_ring[mbox].imfq_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 	/* Free memory allocated for RX descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	dma_free_coherent(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 		priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 		priv->imsg_ring[mbox].imd_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 		priv->imsg_ring[mbox].imd_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 	priv->imsg_ring[mbox].imd_base = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408)  * tsi721_add_inb_buffer - Add buffer to the Tsi721 inbound message queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409)  * @mport: Master port implementing the Inbound Messaging Engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410)  * @mbox: Inbound mailbox number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411)  * @buf: Buffer to add to inbound queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 	u32 rx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 	rx_slot = priv->imsg_ring[mbox].rx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 	if (priv->imsg_ring[mbox].imq_base[rx_slot]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 		tsi_err(&priv->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 			"Error adding inbound buffer %d, buffer exists",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 			rx_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 		rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 	priv->imsg_ring[mbox].imq_base[rx_slot] = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	if (++priv->imsg_ring[mbox].rx_slot == priv->imsg_ring[mbox].size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 		priv->imsg_ring[mbox].rx_slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438)  * tsi721_get_inb_message - Fetch inbound message from the Tsi721 MSG Queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439)  * @mport: Master port implementing the Inbound Messaging Engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440)  * @mbox: Inbound mailbox number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442)  * Returns pointer to the message on success or NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 	struct tsi721_imsg_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 	u32 rx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 	void *rx_virt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	u64 rx_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 	void *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	u64 *free_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 	int ch = mbox + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 	int msg_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	if (!priv->imsg_init[mbox])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	desc = priv->imsg_ring[mbox].imd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 	desc += priv->imsg_ring[mbox].desc_rdptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	if (!(le32_to_cpu(desc->msg_info) & TSI721_IMD_HO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 	rx_slot = priv->imsg_ring[mbox].rx_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 	while (priv->imsg_ring[mbox].imq_base[rx_slot] == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 		if (++rx_slot == priv->imsg_ring[mbox].size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 			rx_slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 	rx_phys = ((u64)le32_to_cpu(desc->bufptr_hi) << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 			le32_to_cpu(desc->bufptr_lo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	rx_virt = priv->imsg_ring[mbox].buf_base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 		  (rx_phys - (u64)priv->imsg_ring[mbox].buf_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 	buf = priv->imsg_ring[mbox].imq_base[rx_slot];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	msg_size = le32_to_cpu(desc->msg_info) & TSI721_IMD_BCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 	if (msg_size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 		msg_size = RIO_MAX_MSG_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 	memcpy(buf, rx_virt, msg_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 	priv->imsg_ring[mbox].imq_base[rx_slot] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 	desc->msg_info &= cpu_to_le32(~TSI721_IMD_HO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 	if (++priv->imsg_ring[mbox].desc_rdptr == priv->imsg_ring[mbox].size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 		priv->imsg_ring[mbox].desc_rdptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 	iowrite32(priv->imsg_ring[mbox].desc_rdptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 		priv->regs + TSI721_IBDMAC_DQRP(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 	/* Return free buffer into the pointer list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 	free_ptr = priv->imsg_ring[mbox].imfq_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 	free_ptr[priv->imsg_ring[mbox].fq_wrptr] = cpu_to_le64(rx_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 	if (++priv->imsg_ring[mbox].fq_wrptr == priv->imsg_ring[mbox].size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 		priv->imsg_ring[mbox].fq_wrptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	iowrite32(priv->imsg_ring[mbox].fq_wrptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 		priv->regs + TSI721_IBDMAC_FQWP(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 	return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506)  * tsi721_messages_init - Initialization of Messaging Engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)  * @priv: pointer to tsi721 private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509)  * Configures Tsi721 messaging engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) static int tsi721_messages_init(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 	int	ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 	iowrite32(0, priv->regs + TSI721_SMSG_ECC_LOG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 	iowrite32(0, priv->regs + TSI721_RETRY_GEN_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 	iowrite32(0, priv->regs + TSI721_RETRY_RX_CNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 	/* Set SRIO Message Request/Response Timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 	iowrite32(TSI721_RQRPTO_VAL, priv->regs + TSI721_RQRPTO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 	/* Initialize Inbound Messaging Engine Registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 	for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 		/* Clear interrupt bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 		iowrite32(TSI721_IBDMAC_INT_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 			priv->regs + TSI721_IBDMAC_INT(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 		/* Clear Status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 		iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 		iowrite32(TSI721_SMSG_ECC_COR_LOG_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 				priv->regs + TSI721_SMSG_ECC_COR_LOG(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 		iowrite32(TSI721_SMSG_ECC_NCOR_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 				priv->regs + TSI721_SMSG_ECC_NCOR(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540)  * tsi721_query_mport - Fetch inbound message from the Tsi721 MSG Queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541)  * @mport: Master port implementing the Inbound Messaging Engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542)  * @mbox: Inbound mailbox number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544)  * Returns pointer to the message on success or NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) static int tsi721_query_mport(struct rio_mport *mport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 			      struct rio_mport_attr *attr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 	struct tsi721_device *priv = mport->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	u32 rval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 	rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_ERR_STS_CSR(0, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	if (rval & RIO_PORT_N_ERR_STS_PORT_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 		rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL2_CSR(0, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 		attr->link_speed = (rval & RIO_PORT_N_CTL2_SEL_BAUD) >> 28;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 		rval = ioread32(priv->regs + 0x100 + RIO_PORT_N_CTL_CSR(0, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 		attr->link_width = (rval & RIO_PORT_N_CTL_IPW) >> 27;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 		attr->link_speed = RIO_LINK_DOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) #ifdef CONFIG_RAPIDIO_DMA_ENGINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 	attr->flags = RIO_MPORT_DMA | RIO_MPORT_DMA_SG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	attr->dma_max_sge = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 	attr->dma_max_size = TSI721_BDMA_MAX_BCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	attr->dma_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	attr->flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573)  * tsi721_disable_ints - disables all device interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)  * @priv: pointer to tsi721 private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) static void tsi721_disable_ints(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 	int ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 	/* Disable all device level interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 	iowrite32(0, priv->regs + TSI721_DEV_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	/* Disable all Device Channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 	iowrite32(0, priv->regs + TSI721_DEV_CHAN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 	/* Disable all Inbound Msg Channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 	for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 		iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	/* Disable all Outbound Msg Channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 	for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) 		iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) 	/* Disable all general messaging interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	iowrite32(0, priv->regs + TSI721_SMSG_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	/* Disable all BDMA Channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) 	for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 		iowrite32(0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) 			priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	/* Disable all general BDMA interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 	iowrite32(0, priv->regs + TSI721_BDMA_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 	/* Disable all SRIO Channel interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 	for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) 	/* Disable all general SR2PC interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 	iowrite32(0, priv->regs + TSI721_SR2PC_GEN_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) 	/* Disable all PC2SR interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	iowrite32(0, priv->regs + TSI721_PC2SR_INTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 	/* Disable all I2C interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	iowrite32(0, priv->regs + TSI721_I2C_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 	/* Disable SRIO MAC interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	iowrite32(0, priv->regs + TSI721_RIO_EM_INT_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 	iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) static struct rio_ops tsi721_rio_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	.lcread			= tsi721_lcread,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 	.lcwrite		= tsi721_lcwrite,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	.cread			= tsi721_cread_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	.cwrite			= tsi721_cwrite_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	.dsend			= tsi721_dsend,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 	.open_inb_mbox		= tsi721_open_inb_mbox,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	.close_inb_mbox		= tsi721_close_inb_mbox,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) 	.open_outb_mbox		= tsi721_open_outb_mbox,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 	.close_outb_mbox	= tsi721_close_outb_mbox,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) 	.add_outb_message	= tsi721_add_outb_message,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) 	.add_inb_buffer		= tsi721_add_inb_buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) 	.get_inb_message	= tsi721_get_inb_message,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	.map_inb		= tsi721_rio_map_inb_mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	.unmap_inb		= tsi721_rio_unmap_inb_mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	.pwenable		= tsi721_pw_enable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 	.query_mport		= tsi721_query_mport,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	.map_outb		= tsi721_map_outb_win,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 	.unmap_outb		= tsi721_unmap_outb_win,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) static void tsi721_mport_release(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) 	struct rio_mport *mport = to_rio_mport(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) 	tsi_debug(EXIT, dev, "%s id=%d", mport->name, mport->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652)  * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653)  * @priv: pointer to tsi721 private data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655)  * Configures Tsi721 as RapidIO master port.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) static int tsi721_setup_mport(struct tsi721_device *priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 	struct pci_dev *pdev = priv->pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	int err = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	struct rio_mport *mport = &priv->mport;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 	err = rio_mport_initialize(mport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 	mport->ops = &tsi721_rio_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 	mport->index = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 	mport->sys_size = 0; /* small system */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 	mport->priv = (void *)priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 	mport->phys_efptr = 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 	mport->phys_rmap = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 	mport->dev.parent = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 	mport->dev.release = tsi721_mport_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 	INIT_LIST_HEAD(&mport->dbells);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 	rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) 	rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) 	snprintf(mport->name, RIO_MAX_MPORT_NAME, "%s(%s)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 		 dev_driver_string(&pdev->dev), dev_name(&pdev->dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	/* Hook up interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) 	if (!tsi721_enable_msix(priv))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 		priv->flags |= TSI721_USING_MSIX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	else if (!pci_enable_msi(pdev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 		priv->flags |= TSI721_USING_MSI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 		tsi_debug(MPORT, &pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 			 "MSI/MSI-X is not available. Using legacy INTx.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) #endif /* CONFIG_PCI_MSI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 	err = tsi721_request_irq(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 		tsi_err(&pdev->dev, "Unable to get PCI IRQ %02X (err=0x%x)",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 			pdev->irq, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) 		return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) #ifdef CONFIG_RAPIDIO_DMA_ENGINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 	err = tsi721_register_dma(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 		goto err_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) 	/* Enable SRIO link */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 	iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) 		  TSI721_DEVCTL_SRBOOT_CMPL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) 		  priv->regs + TSI721_DEVCTL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 	if (mport->host_deviceid >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 		iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 			  RIO_PORT_GEN_DISCOVERED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 			  priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 		iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 	err = rio_register_mport(mport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 		tsi721_unregister_dma(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 		goto err_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) err_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 	tsi721_free_irq(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) static int tsi721_probe(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 				  const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) 	struct tsi721_device *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) 	priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	if (!priv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) 		goto err_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) 	err = pci_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) 		tsi_err(&pdev->dev, "Failed to enable PCI device");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) 		goto err_clean;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) 	priv->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758) 		for (i = 0; i < PCI_STD_NUM_BARS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) 			tsi_debug(INIT, &pdev->dev, "res%d %pR",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) 				  i, &pdev->resource[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) 	 * Verify BAR configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) 	/* BAR_0 (registers) must be 512KB+ in 32-bit address space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) 	if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) 	    pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) 	    pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772) 		tsi_err(&pdev->dev, "Missing or misconfigured CSR BAR0");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) 		goto err_disable_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) 	/* BAR_1 (outbound doorbells) must be 16MB+ in 32-bit address space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) 	if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779) 	    pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) 	    pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) 		tsi_err(&pdev->dev, "Missing or misconfigured Doorbell BAR1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782) 		err = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) 		goto err_disable_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) 	 * BAR_2 and BAR_4 (outbound translation) must be in 64-bit PCIe address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) 	 * space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789) 	 * NOTE: BAR_2 and BAR_4 are not used by this version of driver.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) 	 * It may be a good idea to keep them disabled using HW configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) 	 * to save PCI memory space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) 	priv->p2r_bar[0].size = priv->p2r_bar[1].size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) 	if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797) 		if (pci_resource_flags(pdev, BAR_2) & IORESOURCE_PREFETCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) 			tsi_debug(INIT, &pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) 				 "Prefetchable OBW BAR2 will not be used");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801) 			priv->p2r_bar[0].base = pci_resource_start(pdev, BAR_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) 			priv->p2r_bar[0].size = pci_resource_len(pdev, BAR_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) 	if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) 		if (pci_resource_flags(pdev, BAR_4) & IORESOURCE_PREFETCH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) 			tsi_debug(INIT, &pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) 				 "Prefetchable OBW BAR4 will not be used");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) 			priv->p2r_bar[1].base = pci_resource_start(pdev, BAR_4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) 			priv->p2r_bar[1].size = pci_resource_len(pdev, BAR_4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) 	err = pci_request_regions(pdev, DRV_NAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) 	if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) 		tsi_err(&pdev->dev, "Unable to obtain PCI resources");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) 		goto err_disable_pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) 	pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) 	priv->regs = pci_ioremap_bar(pdev, BAR_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) 	if (!priv->regs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) 		tsi_err(&pdev->dev, "Unable to map device registers space");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) 		goto err_free_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) 	priv->odb_base = pci_ioremap_bar(pdev, BAR_1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) 	if (!priv->odb_base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) 		tsi_err(&pdev->dev, "Unable to map outbound doorbells space");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) 		goto err_unmap_bars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) 	/* Configure DMA attributes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) 	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) 			tsi_err(&pdev->dev, "Unable to set DMA mask");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) 			goto err_unmap_bars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) 		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847) 			tsi_info(&pdev->dev, "Unable to set consistent DMA mask");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) 		if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) 			tsi_info(&pdev->dev, "Unable to set consistent DMA mask");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) 	BUG_ON(!pci_is_pcie(pdev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) 	/* Clear "no snoop" and "relaxed ordering" bits. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) 	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) 		PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) 	/* Override PCIe Maximum Read Request Size setting if requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861) 	if (pcie_mrrs >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) 		if (pcie_mrrs <= 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) 			pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) 					PCI_EXP_DEVCTL_READRQ, pcie_mrrs << 12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) 			tsi_info(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) 				 "Invalid MRRS override value %d", pcie_mrrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) 	/* Set PCIe completion timeout to 1-10ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) 	pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872) 					   PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0x2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) 	 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) 	pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0x01);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) 	pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXTBL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) 						TSI721_MSIXTBL_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) 	pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXPBA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) 						TSI721_MSIXPBA_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882) 	pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) 	/* End of FIXUP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) 	tsi721_disable_ints(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) 	tsi721_init_pc2sr_mapping(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) 	tsi721_init_sr2pc_mapping(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) 	if (tsi721_bdma_maint_init(priv)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) 		tsi_err(&pdev->dev, "BDMA initialization failed");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) 		err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) 		goto err_unmap_bars;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) 	err = tsi721_doorbell_init(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) 		goto err_free_bdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900) 	tsi721_port_write_init(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) 	err = tsi721_messages_init(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) 		goto err_free_consistent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) 	err = tsi721_setup_mport(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) 		goto err_free_consistent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) 	pci_set_drvdata(pdev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) 	tsi721_interrupts_init(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) err_free_consistent:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) 	tsi721_port_write_free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) 	tsi721_doorbell_free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) err_free_bdma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) 	tsi721_bdma_maint_free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) err_unmap_bars:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921) 	if (priv->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) 		iounmap(priv->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) 	if (priv->odb_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) 		iounmap(priv->odb_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) err_free_res:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926) 	pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) 	pci_clear_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) err_disable_pdev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930) err_clean:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) 	kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) err_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) static void tsi721_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) 	struct tsi721_device *priv = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) 	tsi_debug(EXIT, &pdev->dev, "enter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) 	tsi721_disable_ints(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) 	tsi721_free_irq(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) 	flush_scheduled_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) 	rio_unregister_mport(&priv->mport);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) 	tsi721_unregister_dma(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) 	tsi721_bdma_maint_free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) 	tsi721_doorbell_free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) 	tsi721_port_write_free(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) 	tsi721_close_sr2pc_mapping(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) 	if (priv->regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) 		iounmap(priv->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) 	if (priv->odb_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) 		iounmap(priv->odb_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) #ifdef CONFIG_PCI_MSI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) 	if (priv->flags & TSI721_USING_MSIX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) 		pci_disable_msix(priv->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) 	else if (priv->flags & TSI721_USING_MSI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) 		pci_disable_msi(priv->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) 	pci_release_regions(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) 	pci_clear_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) 	pci_set_drvdata(pdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) 	kfree(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) 	tsi_debug(EXIT, &pdev->dev, "exit");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) static void tsi721_shutdown(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) 	struct tsi721_device *priv = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) 	tsi_debug(EXIT, &pdev->dev, "enter");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) 	tsi721_disable_ints(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978) 	tsi721_dma_stop_all(priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) 	pci_clear_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980) 	pci_disable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) static const struct pci_device_id tsi721_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) 	{ PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) 	{ 0, }	/* terminate list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) MODULE_DEVICE_TABLE(pci, tsi721_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) static struct pci_driver tsi721_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991) 	.name		= "tsi721",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) 	.id_table	= tsi721_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993) 	.probe		= tsi721_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) 	.remove		= tsi721_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) 	.shutdown	= tsi721_shutdown,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) module_pci_driver(tsi721_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) MODULE_DESCRIPTION("IDT Tsi721 PCIExpress-to-SRIO bridge driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001) MODULE_AUTHOR("Integrated Device Technology, Inc.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) MODULE_LICENSE("GPL");