Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * DMA driver for Altera mSGDMA IP core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2017 Stefan Roese <sr@denx.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Based on drivers/dma/xilinx/zynqmp_dma.c, which is:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/iopoll.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define MSGDMA_MAX_TRANS_LEN		U32_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define MSGDMA_DESC_NUM			1024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * struct msgdma_extended_desc - implements an extended descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  * @read_addr_lo: data buffer source address low bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * @write_addr_lo: data buffer destination address low bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * @len: the number of bytes to transfer per descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * @burst_seq_num: bit 31:24 write burst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  *		   bit 23:16 read burst
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  *		   bit 15:00 sequence number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * @stride: bit 31:16 write stride
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  *	    bit 15:00 read stride
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * @read_addr_hi: data buffer source address high bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * @write_addr_hi: data buffer destination address high bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * @control: characteristics of the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) struct msgdma_extended_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	u32 read_addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	u32 write_addr_lo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	u32 len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	u32 burst_seq_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	u32 stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	u32 read_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	u32 write_addr_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	u32 control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) /* mSGDMA descriptor control field bit definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define MSGDMA_DESC_CTL_SET_CH(x)	((x) & 0xff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define MSGDMA_DESC_CTL_GEN_SOP		BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define MSGDMA_DESC_CTL_GEN_EOP		BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define MSGDMA_DESC_CTL_PARK_READS	BIT(10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define MSGDMA_DESC_CTL_PARK_WRITES	BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define MSGDMA_DESC_CTL_END_ON_EOP	BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define MSGDMA_DESC_CTL_END_ON_LEN	BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define MSGDMA_DESC_CTL_TR_COMP_IRQ	BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define MSGDMA_DESC_CTL_EARLY_IRQ	BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define MSGDMA_DESC_CTL_TR_ERR_IRQ	GENMASK(23, 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #define MSGDMA_DESC_CTL_EARLY_DONE	BIT(24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  * Writing "1" the "go" bit commits the entire descriptor into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  * descriptor FIFO(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #define MSGDMA_DESC_CTL_GO		BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) /* Tx buffer control flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #define MSGDMA_DESC_CTL_TX_FIRST	(MSGDMA_DESC_CTL_GEN_SOP |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 					 MSGDMA_DESC_CTL_TR_ERR_IRQ |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 					 MSGDMA_DESC_CTL_GO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #define MSGDMA_DESC_CTL_TX_MIDDLE	(MSGDMA_DESC_CTL_TR_ERR_IRQ |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 					 MSGDMA_DESC_CTL_GO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) #define MSGDMA_DESC_CTL_TX_LAST		(MSGDMA_DESC_CTL_GEN_EOP |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 					 MSGDMA_DESC_CTL_TR_COMP_IRQ |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 					 MSGDMA_DESC_CTL_TR_ERR_IRQ |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 					 MSGDMA_DESC_CTL_GO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) #define MSGDMA_DESC_CTL_TX_SINGLE	(MSGDMA_DESC_CTL_GEN_SOP |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 					 MSGDMA_DESC_CTL_GEN_EOP |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 					 MSGDMA_DESC_CTL_TR_COMP_IRQ |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 					 MSGDMA_DESC_CTL_TR_ERR_IRQ |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 					 MSGDMA_DESC_CTL_GO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #define MSGDMA_DESC_CTL_RX_SINGLE	(MSGDMA_DESC_CTL_END_ON_EOP |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 					 MSGDMA_DESC_CTL_END_ON_LEN |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 					 MSGDMA_DESC_CTL_TR_COMP_IRQ |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 					 MSGDMA_DESC_CTL_EARLY_IRQ |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 					 MSGDMA_DESC_CTL_TR_ERR_IRQ |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 					 MSGDMA_DESC_CTL_GO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) /* mSGDMA extended descriptor stride definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) #define MSGDMA_DESC_STRIDE_RD		0x00000001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define MSGDMA_DESC_STRIDE_WR		0x00010000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define MSGDMA_DESC_STRIDE_RW		0x00010001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* mSGDMA dispatcher control and status register map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define MSGDMA_CSR_STATUS		0x00	/* Read / Clear */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define MSGDMA_CSR_CONTROL		0x04	/* Read / Write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define MSGDMA_CSR_RW_FILL_LEVEL	0x08	/* 31:16 - write fill level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 						/* 15:00 - read fill level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define MSGDMA_CSR_RESP_FILL_LEVEL	0x0c	/* response FIFO fill level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define MSGDMA_CSR_RW_SEQ_NUM		0x10	/* 31:16 - write seq number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 						/* 15:00 - read seq number */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* mSGDMA CSR status register bit definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define MSGDMA_CSR_STAT_BUSY			BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define MSGDMA_CSR_STAT_DESC_BUF_EMPTY		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define MSGDMA_CSR_STAT_DESC_BUF_FULL		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define MSGDMA_CSR_STAT_RESP_BUF_EMPTY		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define MSGDMA_CSR_STAT_RESP_BUF_FULL		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define MSGDMA_CSR_STAT_STOPPED			BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define MSGDMA_CSR_STAT_RESETTING		BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define MSGDMA_CSR_STAT_STOPPED_ON_ERR		BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define MSGDMA_CSR_STAT_STOPPED_ON_EARLY	BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define MSGDMA_CSR_STAT_IRQ			BIT(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define MSGDMA_CSR_STAT_MASK			GENMASK(9, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ	GENMASK(8, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define DESC_EMPTY	(MSGDMA_CSR_STAT_DESC_BUF_EMPTY | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			 MSGDMA_CSR_STAT_RESP_BUF_EMPTY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /* mSGDMA CSR control register bit definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define MSGDMA_CSR_CTL_STOP			BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define MSGDMA_CSR_CTL_RESET			BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define MSGDMA_CSR_CTL_STOP_ON_ERR		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define MSGDMA_CSR_CTL_STOP_ON_EARLY		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define MSGDMA_CSR_CTL_GLOBAL_INTR		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define MSGDMA_CSR_CTL_STOP_DESCS		BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /* mSGDMA CSR fill level bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define MSGDMA_CSR_WR_FILL_LEVEL_GET(v)		(((v) & 0xffff0000) >> 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define MSGDMA_CSR_RD_FILL_LEVEL_GET(v)		((v) & 0x0000ffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v)	((v) & 0x0000ffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define MSGDMA_CSR_SEQ_NUM_GET(v)		(((v) & 0xffff0000) >> 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /* mSGDMA response register map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define MSGDMA_RESP_BYTES_TRANSFERRED	0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define MSGDMA_RESP_STATUS		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) /* mSGDMA response register bit definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define MSGDMA_RESP_EARLY_TERM	BIT(8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define MSGDMA_RESP_ERR_MASK	0xff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  * struct msgdma_sw_desc - implements a sw descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  * @async_tx: support for the async_tx api
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * @hw_desc: assosiated HW descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  * @node: node to move from the free list to the tx list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)  * @tx_list: transmit list node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct msgdma_sw_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	struct dma_async_tx_descriptor async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	struct msgdma_extended_desc hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	struct list_head tx_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  * struct msgdma_device - DMA device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct msgdma_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	struct tasklet_struct irq_tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	struct list_head pending_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	struct list_head free_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	struct list_head active_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	struct list_head done_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	u32 desc_free_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	bool idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	struct dma_device dmadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	struct dma_chan	dmachan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	dma_addr_t hw_desq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	struct msgdma_sw_desc *sw_desq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	unsigned int npendings;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	struct dma_slave_config slave_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	/* mSGDMA controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	void __iomem *csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	/* mSGDMA descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	void __iomem *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	/* mSGDMA response */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	void __iomem *resp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define to_mdev(chan)	container_of(chan, struct msgdma_device, dmachan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define tx_to_desc(tx)	container_of(tx, struct msgdma_sw_desc, async_tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  * msgdma_get_descriptor - Get the sw descriptor from the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  * @mdev: Pointer to the Altera mSGDMA device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)  * Return: The sw descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	struct msgdma_sw_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	spin_lock_irqsave(&mdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	spin_unlock_irqrestore(&mdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	INIT_LIST_HEAD(&desc->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  * msgdma_free_descriptor - Issue pending transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  * @mdev: Pointer to the Altera mSGDMA device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  * @desc: Transaction descriptor pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static void msgdma_free_descriptor(struct msgdma_device *mdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 				   struct msgdma_sw_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	struct msgdma_sw_desc *child, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	mdev->desc_free_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	list_add_tail(&desc->node, &mdev->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	list_for_each_entry_safe(child, next, &desc->tx_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		mdev->desc_free_cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		list_move_tail(&child->node, &mdev->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  * msgdma_free_desc_list - Free descriptors list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  * @mdev: Pointer to the Altera mSGDMA device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  * @list: List to parse and delete the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static void msgdma_free_desc_list(struct msgdma_device *mdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 				  struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	struct msgdma_sw_desc *desc, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	list_for_each_entry_safe(desc, next, list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		msgdma_free_descriptor(mdev, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)  * msgdma_desc_config - Configure the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  * @desc: Hw descriptor pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  * @dst: Destination buffer address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)  * @src: Source buffer address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)  * @len: Transfer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  * @stride: Read/write stride value to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) static void msgdma_desc_config(struct msgdma_extended_desc *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 			       dma_addr_t dst, dma_addr_t src, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 			       u32 stride)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	/* Set lower 32bits of src & dst addresses in the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	desc->read_addr_lo = lower_32_bits(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	desc->write_addr_lo = lower_32_bits(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	/* Set upper 32bits of src & dst addresses in the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	desc->read_addr_hi = upper_32_bits(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	desc->write_addr_hi = upper_32_bits(dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	desc->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	desc->stride = stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	desc->burst_seq_num = 0;	/* 0 will result in max burst length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	 * Don't set interrupt on xfer end yet, this will be done later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	 * for the "last" descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	desc->control = MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		MSGDMA_DESC_CTL_END_ON_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  * msgdma_desc_config_eod - Mark the descriptor as end descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  * @desc: Hw descriptor pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static void msgdma_desc_config_eod(struct msgdma_extended_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	desc->control |= MSGDMA_DESC_CTL_TR_COMP_IRQ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  * msgdma_tx_submit - Submit DMA transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  * @tx: Async transaction descriptor pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  * Return: cookie value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	struct msgdma_device *mdev = to_mdev(tx->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	struct msgdma_sw_desc *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	new = tx_to_desc(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	spin_lock_irqsave(&mdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	cookie = dma_cookie_assign(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	list_add_tail(&new->node, &mdev->pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	spin_unlock_irqrestore(&mdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  * msgdma_prep_memcpy - prepare descriptors for memcpy transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  * @dchan: DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  * @dma_dst: Destination buffer address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  * @dma_src: Source buffer address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  * @len: Transfer length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)  * @flags: transfer ack flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)  * Return: Async transaction descriptor on success and NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		   dma_addr_t dma_src, size_t len, ulong flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	struct msgdma_device *mdev = to_mdev(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	struct msgdma_sw_desc *new, *first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	struct msgdma_extended_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	size_t copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	u32 desc_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 	unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	spin_lock_irqsave(&mdev->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	if (desc_cnt > mdev->desc_free_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		spin_unlock_irqrestore(&mdev->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	mdev->desc_free_cnt -= desc_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	spin_unlock_irqrestore(&mdev->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		/* Allocate and populate the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		new = msgdma_get_descriptor(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		copy = min_t(size_t, len, MSGDMA_MAX_TRANS_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		desc = &new->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		msgdma_desc_config(desc, dma_dst, dma_src, copy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 				   MSGDMA_DESC_STRIDE_RW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		len -= copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		dma_src += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		dma_dst += copy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			first = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 			list_add_tail(&new->node, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	} while (len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	msgdma_desc_config_eod(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	async_tx_ack(&first->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	first->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	return &first->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)  * msgdma_prep_slave_sg - prepare descriptors for a slave sg transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)  * @dchan: DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)  * @sgl: Destination scatter list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)  * @sg_len: Number of entries in destination scatter list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)  * @dir: DMA transfer direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)  * @flags: transfer ack flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)  * @context: transfer context (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		     unsigned int sg_len, enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		     unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	struct msgdma_device *mdev = to_mdev(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	struct dma_slave_config *cfg = &mdev->slave_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	struct msgdma_sw_desc *new, *first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	void *desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	size_t len, avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	dma_addr_t dma_dst, dma_src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	u32 desc_cnt = 0, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	u32 stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	unsigned long irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	for_each_sg(sgl, sg, sg_len, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	spin_lock_irqsave(&mdev->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	if (desc_cnt > mdev->desc_free_cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		spin_unlock_irqrestore(&mdev->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	mdev->desc_free_cnt -= desc_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	spin_unlock_irqrestore(&mdev->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	avail = sg_dma_len(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	/* Run until we are out of scatterlist entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		/* Allocate and populate the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 		new = msgdma_get_descriptor(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		desc = &new->hw_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		len = min_t(size_t, avail, MSGDMA_MAX_TRANS_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		if (dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 			dma_src = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 			dma_dst = cfg->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 			stride = MSGDMA_DESC_STRIDE_RD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			dma_src = cfg->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 			dma_dst = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 			stride = MSGDMA_DESC_STRIDE_WR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		msgdma_desc_config(desc, dma_dst, dma_src, len, stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		avail -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 			first = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 			list_add_tail(&new->node, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 		/* Fetch the next scatterlist entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		if (avail == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 			if (sg_len == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 			sgl = sg_next(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 			if (sgl == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 			sg_len--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 			avail = sg_dma_len(sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	msgdma_desc_config_eod(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	first->async_tx.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	return &first->async_tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) static int msgdma_dma_config(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 			     struct dma_slave_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	struct msgdma_device *mdev = to_mdev(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	memcpy(&mdev->slave_cfg, config, sizeof(*config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static void msgdma_reset(struct msgdma_device *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	/* Reset mSGDMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 				 (val & MSGDMA_CSR_STAT_RESETTING) == 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 				 1, 10000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		dev_err(mdev->dev, "DMA channel did not reset\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	/* Clear all status bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	/* Enable the DMA controller including interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	iowrite32(MSGDMA_CSR_CTL_STOP_ON_ERR | MSGDMA_CSR_CTL_STOP_ON_EARLY |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		  MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	mdev->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static void msgdma_copy_one(struct msgdma_device *mdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 			    struct msgdma_sw_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	void __iomem *hw_desc = mdev->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 	 * Check if the DESC FIFO it not full. If its full, we need to wait
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	 * for at least one entry to become free again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	       MSGDMA_CSR_STAT_DESC_BUF_FULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		mdelay(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	 * The descriptor needs to get copied into the descriptor FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	 * of the DMA controller. The descriptor will get flushed to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	 * FIFO, once the last word (control word) is written. Since we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	 * are not 100% sure that memcpy() writes all word in the "correct"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	 * oder (address from low to high) on all architectures, we make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	 * sure this control word is written last by single coding it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	 * adding some write-barriers here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	memcpy((void __force *)hw_desc, &desc->hw_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	       sizeof(desc->hw_desc) - sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	/* Write control word last to flush this descriptor into the FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	mdev->idle = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	iowrite32(desc->hw_desc.control, hw_desc +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 		  offsetof(struct msgdma_extended_desc, control));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)  * msgdma_copy_desc_to_fifo - copy descriptor(s) into controller FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)  * @mdev: Pointer to the Altera mSGDMA device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)  * @desc: Transaction descriptor pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 				     struct msgdma_sw_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	struct msgdma_sw_desc *sdesc, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	msgdma_copy_one(mdev, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	list_for_each_entry_safe(sdesc, next, &desc->tx_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		msgdma_copy_one(mdev, sdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)  * msgdma_start_transfer - Initiate the new transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)  * @mdev: Pointer to the Altera mSGDMA device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static void msgdma_start_transfer(struct msgdma_device *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	struct msgdma_sw_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	if (!mdev->idle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	desc = list_first_entry_or_null(&mdev->pending_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 					struct msgdma_sw_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	list_splice_tail_init(&mdev->pending_list, &mdev->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	msgdma_copy_desc_to_fifo(mdev, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)  * msgdma_issue_pending - Issue pending transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)  * @chan: DMA channel pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static void msgdma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	struct msgdma_device *mdev = to_mdev(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	spin_lock_irqsave(&mdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	msgdma_start_transfer(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	spin_unlock_irqrestore(&mdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)  * msgdma_chan_desc_cleanup - Cleanup the completed descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580)  * @mdev: Pointer to the Altera mSGDMA device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	struct msgdma_sw_desc *desc, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		dma_async_tx_callback callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		void *callback_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		callback = desc->async_tx.callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		callback_param = desc->async_tx.callback_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		if (callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 			spin_unlock(&mdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 			callback(callback_param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 			spin_lock(&mdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 		/* Run any dependencies, then free the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 		msgdma_free_descriptor(mdev, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)  * msgdma_complete_descriptor - Mark the active descriptor as complete
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)  * @mdev: Pointer to the Altera mSGDMA device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) static void msgdma_complete_descriptor(struct msgdma_device *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	struct msgdma_sw_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	desc = list_first_entry_or_null(&mdev->active_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 					struct msgdma_sw_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	dma_cookie_complete(&desc->async_tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	list_add_tail(&desc->node, &mdev->done_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)  * msgdma_free_descriptors - Free channel descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)  * @mdev: Pointer to the Altera mSGDMA device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static void msgdma_free_descriptors(struct msgdma_device *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	msgdma_free_desc_list(mdev, &mdev->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	msgdma_free_desc_list(mdev, &mdev->pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	msgdma_free_desc_list(mdev, &mdev->done_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)  * msgdma_free_chan_resources - Free channel resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)  * @dchan: DMA channel pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) static void msgdma_free_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	struct msgdma_device *mdev = to_mdev(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	spin_lock_irqsave(&mdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 	msgdma_free_descriptors(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	spin_unlock_irqrestore(&mdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 	kfree(mdev->sw_desq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)  * msgdma_alloc_chan_resources - Allocate channel resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650)  * @dchan: DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)  * Return: Number of descriptors on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) static int msgdma_alloc_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	struct msgdma_device *mdev = to_mdev(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	struct msgdma_sw_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	if (!mdev->sw_desq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	mdev->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	mdev->desc_free_cnt = MSGDMA_DESC_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	INIT_LIST_HEAD(&mdev->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	for (i = 0; i < MSGDMA_DESC_NUM; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 		desc = mdev->sw_desq + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 		dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 		desc->async_tx.tx_submit = msgdma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		list_add_tail(&desc->node, &mdev->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	return MSGDMA_DESC_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)  * msgdma_tasklet - Schedule completion tasklet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)  * @t: Pointer to the Altera sSGDMA channel structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) static void msgdma_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	struct msgdma_device *mdev = from_tasklet(mdev, t, irq_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	u32 count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	u32 __maybe_unused size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 	u32 __maybe_unused status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 	spin_lock_irqsave(&mdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 	/* Read number of responses that are available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 		__func__, __LINE__, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	while (count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		 * Read both longwords to purge this response from the FIFO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 		 * On Avalon-MM implementations, size and status do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		 * have any real values, like transferred bytes or error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		 * bits. So we need to just drop these values.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 		status = ioread32(mdev->resp + MSGDMA_RESP_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 		msgdma_complete_descriptor(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 		msgdma_chan_desc_cleanup(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	spin_unlock_irqrestore(&mdev->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)  * msgdma_irq_handler - Altera mSGDMA Interrupt handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717)  * @irq: IRQ number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)  * @data: Pointer to the Altera mSGDMA device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)  * Return: IRQ_HANDLED/IRQ_NONE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) static irqreturn_t msgdma_irq_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	struct msgdma_device *mdev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	u32 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	status = ioread32(mdev->csr + MSGDMA_CSR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	if ((status & MSGDMA_CSR_STAT_BUSY) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 		/* Start next transfer if the DMA controller is idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 		spin_lock(&mdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 		mdev->idle = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 		msgdma_start_transfer(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 		spin_unlock(&mdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 	tasklet_schedule(&mdev->irq_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 	/* Clear interrupt in mSGDMA controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 	iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745)  * msgdma_chan_remove - Channel remove function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)  * @mdev: Pointer to the Altera mSGDMA device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) static void msgdma_dev_remove(struct msgdma_device *mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	if (!mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 	devm_free_irq(mdev->dev, mdev->irq, mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	tasklet_kill(&mdev->irq_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	list_del(&mdev->dmachan.device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) static int request_and_map(struct platform_device *pdev, const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 			   struct resource **res, void __iomem **ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	struct resource *region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	struct device *device = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	if (*res == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 		dev_err(device, "resource %s not defined\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	region = devm_request_mem_region(device, (*res)->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 					 resource_size(*res), dev_name(device));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	if (region == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 		dev_err(device, "unable to request %s\n", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	*ptr = devm_ioremap(device, region->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 				    resource_size(region));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 	if (*ptr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 		dev_err(device, "ioremap of %s failed!", name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)  * msgdma_probe - Driver probe function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)  * @pdev: Pointer to the platform_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)  * Return: '0' on success and failure value on error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) static int msgdma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	struct msgdma_device *mdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 	struct dma_device *dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 	struct resource *dma_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	if (!mdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	mdev->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 	/* Map CSR space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 	ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 	/* Map (extended) descriptor space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 	ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 	/* Map response space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 	ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 	platform_set_drvdata(pdev, mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 	/* Get interrupt nr from platform data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 	mdev->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 	if (mdev->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 	ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 			       0, dev_name(&pdev->dev), mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 	tasklet_setup(&mdev->irq_tasklet, msgdma_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 	dma_cookie_init(&mdev->dmachan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 	spin_lock_init(&mdev->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 	INIT_LIST_HEAD(&mdev->active_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 	INIT_LIST_HEAD(&mdev->pending_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 	INIT_LIST_HEAD(&mdev->done_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 	INIT_LIST_HEAD(&mdev->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 	dma_dev = &mdev->dmadev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 	/* Set DMA capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 	dma_cap_zero(dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 	dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 	dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 	dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 		BIT(DMA_MEM_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 	/* Init DMA link list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 	INIT_LIST_HEAD(&dma_dev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 	/* Set base routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 	dma_dev->device_tx_status = dma_cookie_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 	dma_dev->device_issue_pending = msgdma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 	dma_dev->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 	dma_dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 	dma_dev->device_prep_dma_memcpy = msgdma_prep_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 	dma_dev->device_prep_slave_sg = msgdma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 	dma_dev->device_config = msgdma_dma_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 	dma_dev->device_alloc_chan_resources = msgdma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 	dma_dev->device_free_chan_resources = msgdma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 	mdev->dmachan.device = dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 	list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 	/* Set DMA mask to 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 		dev_warn(&pdev->dev, "unable to set coherent mask to 64");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 		ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 	msgdma_reset(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 	ret = dma_async_device_register(dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 	dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 	msgdma_dev_remove(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)  * msgdma_dma_remove - Driver remove function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903)  * @pdev: Pointer to the platform_device structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)  * Return: Always '0'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) static int msgdma_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 	struct msgdma_device *mdev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 	dma_async_device_unregister(&mdev->dmadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) 	msgdma_dev_remove(mdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) 	dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) static struct platform_driver msgdma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) 		.name = "altera-msgdma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) 	.probe = msgdma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) 	.remove = msgdma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) module_platform_driver(msgdma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) MODULE_ALIAS("platform:altera-msgdma");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) MODULE_DESCRIPTION("Altera mSGDMA driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) MODULE_LICENSE("GPL");