Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Microsemi Switchtec(tm) PCIe Management Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (c) 2019, GigaIO Networks, Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/circ_buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/kref.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) MODULE_VERSION("0.1");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) MODULE_AUTHOR("Logan Gunthorpe");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define PLX_REG_DESC_RING_ADDR			0x214
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define PLX_REG_DESC_RING_ADDR_HI		0x218
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define PLX_REG_DESC_RING_NEXT_ADDR		0x21C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define PLX_REG_DESC_RING_COUNT			0x220
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define PLX_REG_DESC_RING_LAST_ADDR		0x224
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define PLX_REG_DESC_RING_LAST_SIZE		0x228
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define PLX_REG_PREF_LIMIT			0x234
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define PLX_REG_CTRL				0x238
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define PLX_REG_CTRL2				0x23A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define PLX_REG_INTR_CTRL			0x23C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define PLX_REG_INTR_STATUS			0x23E
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define PLX_REG_PREF_LIMIT_PREF_FOUR		8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define PLX_REG_CTRL_GRACEFUL_PAUSE		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define PLX_REG_CTRL_ABORT			BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define PLX_REG_CTRL_WRITE_BACK_EN		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define PLX_REG_CTRL_START			BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define PLX_REG_CTRL_RING_STOP_MODE		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define PLX_REG_CTRL_DESC_MODE_BLOCK		(0 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define PLX_REG_CTRL_DESC_MODE_ON_CHIP		(1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define PLX_REG_CTRL_DESC_MODE_OFF_CHIP		(2 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define PLX_REG_CTRL_DESC_INVALID		BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE	BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define PLX_REG_CTRL_ABORT_DONE			BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define PLX_REG_CTRL_IMM_PAUSE_DONE		BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define PLX_REG_CTRL_IN_PROGRESS		BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define PLX_REG_CTRL_RESET_VAL	(PLX_REG_CTRL_DESC_INVALID | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 				 PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 				 PLX_REG_CTRL_ABORT_DONE | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 				 PLX_REG_CTRL_IMM_PAUSE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define PLX_REG_CTRL_START_VAL	(PLX_REG_CTRL_WRITE_BACK_EN | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 				 PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 				 PLX_REG_CTRL_START | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 				 PLX_REG_CTRL_RESET_VAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB		5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B		7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define PLX_REG_INTR_CRTL_ERROR_EN		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define PLX_REG_INTR_CRTL_INV_DESC_EN		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #define PLX_REG_INTR_CRTL_ABORT_DONE_EN		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #define PLX_REG_INTR_CRTL_PAUSE_DONE_EN		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN	BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #define PLX_REG_INTR_STATUS_ERROR		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #define PLX_REG_INTR_STATUS_INV_DESC		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #define PLX_REG_INTR_STATUS_DESC_DONE		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #define PLX_REG_INTR_CRTL_ABORT_DONE		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) struct plx_dma_hw_std_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	__le32 flags_and_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	__le16 dst_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	__le16 src_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	__le32 dst_addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	__le32 src_addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) #define PLX_DESC_SIZE_MASK		0x7ffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #define PLX_DESC_FLAG_VALID		BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) #define PLX_DESC_FLAG_INT_WHEN_DONE	BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #define PLX_DESC_WB_SUCCESS		BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) #define PLX_DESC_WB_RD_FAIL		BIT(29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #define PLX_DESC_WB_WR_FAIL		BIT(28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #define PLX_DMA_RING_COUNT		2048
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) struct plx_dma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	struct dma_async_tx_descriptor txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	struct plx_dma_hw_std_desc *hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	u32 orig_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) struct plx_dma_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	struct dma_device dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	struct dma_chan dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	struct pci_dev __rcu *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	void __iomem *bar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct tasklet_struct desc_task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	spinlock_t ring_lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	bool ring_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	int head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	int tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct plx_dma_hw_std_desc *hw_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	dma_addr_t hw_ring_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	struct plx_dma_desc **desc_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	return container_of(c, struct plx_dma_dev, dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static struct plx_dma_desc *to_plx_desc(struct dma_async_tx_descriptor *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	return container_of(txd, struct plx_dma_desc, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void plx_dma_process_desc(struct plx_dma_dev *plxdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	struct dmaengine_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	struct plx_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	spin_lock_bh(&plxdev->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	while (plxdev->tail != plxdev->head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		desc = plx_dma_get_desc(plxdev, plxdev->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		if (flags & PLX_DESC_FLAG_VALID)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		if (flags & PLX_DESC_WB_SUCCESS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 			res.result = DMA_TRANS_NOERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		else if (flags & PLX_DESC_WB_WR_FAIL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 			res.result = DMA_TRANS_WRITE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			res.result = DMA_TRANS_READ_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		dma_cookie_complete(&desc->txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		dma_descriptor_unmap(&desc->txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		dmaengine_desc_get_callback_invoke(&desc->txd, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		desc->txd.callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		desc->txd.callback_result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		plxdev->tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	spin_unlock_bh(&plxdev->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void plx_dma_abort_desc(struct plx_dma_dev *plxdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	struct dmaengine_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	struct plx_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	plx_dma_process_desc(plxdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	spin_lock_bh(&plxdev->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	while (plxdev->tail != plxdev->head) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		desc = plx_dma_get_desc(plxdev, plxdev->tail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		res.residue = desc->orig_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		res.result = DMA_TRANS_ABORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		dma_cookie_complete(&desc->txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		dma_descriptor_unmap(&desc->txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		dmaengine_desc_get_callback_invoke(&desc->txd, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 		desc->txd.callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		desc->txd.callback_result = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		plxdev->tail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	spin_unlock_bh(&plxdev->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static void __plx_dma_stop(struct plx_dma_dev *plxdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	unsigned long timeout = jiffies + msecs_to_jiffies(1000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	val = readl(plxdev->bar + PLX_REG_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	       plxdev->bar + PLX_REG_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	while (!time_after(jiffies, timeout)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		val = readl(plxdev->bar + PLX_REG_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		dev_err(plxdev->dma_dev.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			"Timeout waiting for graceful pause!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	       plxdev->bar + PLX_REG_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	writel(0, plxdev->bar + PLX_REG_DESC_RING_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	writel(0, plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static void plx_dma_stop(struct plx_dma_dev *plxdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	if (!rcu_dereference(plxdev->pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	__plx_dma_stop(plxdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static void plx_dma_desc_task(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	struct plx_dma_dev *plxdev = from_tasklet(plxdev, t, desc_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	plx_dma_process_desc(plxdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) static struct dma_async_tx_descriptor *plx_dma_prep_memcpy(struct dma_chan *c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		dma_addr_t dma_dst, dma_addr_t dma_src, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	__acquires(plxdev->ring_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	struct plx_dma_desc *plxdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	spin_lock_bh(&plxdev->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	if (!plxdev->ring_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if (len > PLX_DESC_SIZE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		goto err_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	plxdesc = plx_dma_get_desc(plxdev, plxdev->head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	plxdev->head++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	plxdesc->hw->dst_addr_lo = cpu_to_le32(lower_32_bits(dma_dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	plxdesc->hw->dst_addr_hi = cpu_to_le16(upper_32_bits(dma_dst));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	plxdesc->hw->src_addr_lo = cpu_to_le32(lower_32_bits(dma_src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	plxdesc->hw->src_addr_hi = cpu_to_le16(upper_32_bits(dma_src));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	plxdesc->orig_size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	if (flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		len |= PLX_DESC_FLAG_INT_WHEN_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	plxdesc->hw->flags_and_size = cpu_to_le32(len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	plxdesc->txd.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	/* return with the lock held, it will be released in tx_submit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	return &plxdesc->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) err_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	 * Keep sparse happy by restoring an even lock count on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	 * this lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	__acquire(plxdev->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	spin_unlock_bh(&plxdev->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static dma_cookie_t plx_dma_tx_submit(struct dma_async_tx_descriptor *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	__releases(plxdev->ring_lock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(desc->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	struct plx_dma_desc *plxdesc = to_plx_desc(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	cookie = dma_cookie_assign(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	 * Ensure the descriptor updates are visible to the dma device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	 * before setting the valid bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	plxdesc->hw->flags_and_size |= cpu_to_le32(PLX_DESC_FLAG_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	spin_unlock_bh(&plxdev->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) static enum dma_status plx_dma_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		dma_cookie_t cookie, struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	ret = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if (ret == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	plx_dma_process_desc(plxdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	return dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) static void plx_dma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	if (!rcu_dereference(plxdev->pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	 * Ensure the valid bits are visible before starting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	 * DMA engine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	writew(PLX_REG_CTRL_START_VAL, plxdev->bar + PLX_REG_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) static irqreturn_t plx_dma_isr(int irq, void *devid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	struct plx_dma_dev *plxdev = devid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	status = readw(plxdev->bar + PLX_REG_INTR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	if (!status)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		tasklet_schedule(&plxdev->desc_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	writew(status, plxdev->bar + PLX_REG_INTR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	struct plx_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 				    sizeof(*plxdev->desc_ring), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	if (!plxdev->desc_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	for (i = 0; i < PLX_DMA_RING_COUNT; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 			goto free_and_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		dma_async_tx_descriptor_init(&desc->txd, &plxdev->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		desc->txd.tx_submit = plx_dma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		desc->hw = &plxdev->hw_ring[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		plxdev->desc_ring[i] = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) free_and_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	for (i = 0; i < PLX_DMA_RING_COUNT; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		kfree(plxdev->desc_ring[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	kfree(plxdev->desc_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static int plx_dma_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	plxdev->head = plxdev->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	plxdev->hw_ring = dma_alloc_coherent(plxdev->dma_dev.dev, ring_sz,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 					     &plxdev->hw_ring_dma, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	if (!plxdev->hw_ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	rc = plx_dma_alloc_desc(plxdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		goto out_free_hw_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	if (!rcu_dereference(plxdev->pdev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		rc = -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		goto out_free_hw_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	writel(PLX_REG_CTRL_RESET_VAL, plxdev->bar + PLX_REG_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	writel(lower_32_bits(plxdev->hw_ring_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	       plxdev->bar + PLX_REG_DESC_RING_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	writel(upper_32_bits(plxdev->hw_ring_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	       plxdev->bar + PLX_REG_DESC_RING_ADDR_HI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	writel(lower_32_bits(plxdev->hw_ring_dma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	       plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	writel(PLX_DMA_RING_COUNT, plxdev->bar + PLX_REG_DESC_RING_COUNT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	writel(PLX_REG_PREF_LIMIT_PREF_FOUR, plxdev->bar + PLX_REG_PREF_LIMIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	plxdev->ring_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 	return PLX_DMA_RING_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) out_free_hw_ring:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 			  plxdev->hw_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) static void plx_dma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	struct pci_dev *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	int irq = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	spin_lock_bh(&plxdev->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	plxdev->ring_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	spin_unlock_bh(&plxdev->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	plx_dma_stop(plxdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	pdev = rcu_dereference(plxdev->pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	if (pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		irq = pci_irq_vector(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	if (irq > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		synchronize_irq(irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	tasklet_kill(&plxdev->desc_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	plx_dma_abort_desc(plxdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	for (i = 0; i < PLX_DMA_RING_COUNT; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		kfree(plxdev->desc_ring[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	kfree(plxdev->desc_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			  plxdev->hw_ring_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) static void plx_dma_release(struct dma_device *dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	struct plx_dma_dev *plxdev =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		container_of(dma_dev, struct plx_dma_dev, dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	put_device(dma_dev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	kfree(plxdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) static int plx_dma_create(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	struct plx_dma_dev *plxdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	struct dma_device *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	plxdev = kzalloc(sizeof(*plxdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	if (!plxdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 			 KBUILD_MODNAME, plxdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		goto free_plx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	spin_lock_init(&plxdev->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	tasklet_setup(&plxdev->desc_task, plx_dma_desc_task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	RCU_INIT_POINTER(plxdev->pdev, pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	plxdev->bar = pcim_iomap_table(pdev)[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	dma = &plxdev->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	dma->chancnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	INIT_LIST_HEAD(&dma->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	dma->copy_align = DMAENGINE_ALIGN_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	dma->dev = get_device(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	dma->device_free_chan_resources = plx_dma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 	dma->device_prep_dma_memcpy = plx_dma_prep_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	dma->device_issue_pending = plx_dma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	dma->device_tx_status = plx_dma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	dma->device_release = plx_dma_release;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	chan = &plxdev->dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 	chan->device = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	dma_cookie_init(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	list_add_tail(&chan->device_node, &dma->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	rc = dma_async_device_register(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		pci_err(pdev, "Failed to register dma device: %d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		goto put_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	pci_set_drvdata(pdev, plxdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) put_device:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	put_device(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	free_irq(pci_irq_vector(pdev, 0),  plxdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) free_plx:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	kfree(plxdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) static int plx_dma_probe(struct pci_dev *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 			 const struct pci_device_id *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	rc = pcim_enable_device(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	rc = pcim_iomap_regions(pdev, 1, KBUILD_MODNAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	if (rc <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	pci_set_master(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	rc = plx_dma_create(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		goto err_free_irq_vectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	pci_info(pdev, "PLX DMA Channel Registered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) err_free_irq_vectors:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	pci_free_irq_vectors(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static void plx_dma_remove(struct pci_dev *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	struct plx_dma_dev *plxdev = pci_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	free_irq(pci_irq_vector(pdev, 0),  plxdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	rcu_assign_pointer(plxdev->pdev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	synchronize_rcu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	spin_lock_bh(&plxdev->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	plxdev->ring_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	spin_unlock_bh(&plxdev->ring_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	__plx_dma_stop(plxdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	plx_dma_abort_desc(plxdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	plxdev->bar = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	dma_async_device_unregister(&plxdev->dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	pci_free_irq_vectors(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static const struct pci_device_id plx_dma_pci_tbl[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 		.vendor		= PCI_VENDOR_ID_PLX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		.device		= 0x87D0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 		.subvendor	= PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 		.subdevice	= PCI_ANY_ID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 		.class		= PCI_CLASS_SYSTEM_OTHER << 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		.class_mask	= 0xFFFFFFFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	{0}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static struct pci_driver plx_dma_pci_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	.name           = KBUILD_MODNAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	.id_table       = plx_dma_pci_tbl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	.probe          = plx_dma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	.remove		= plx_dma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) module_pci_driver(plx_dma_pci_driver);