Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Applied Micro X-Gene SoC DMA engine Driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (c) 2015, Applied Micro Circuits Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Authors: Rameshwar Prasad Sahu <rsahu@apm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *	    Loc Ho <lho@apm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * NOTE: PM support is currently not available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) /* X-Gene DMA ring csr registers and bit definations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #define XGENE_DMA_RING_CONFIG			0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define XGENE_DMA_RING_ENABLE			BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define XGENE_DMA_RING_ID			0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define XGENE_DMA_RING_ID_SETUP(v)		((v) | BIT(31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define XGENE_DMA_RING_ID_BUF			0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define XGENE_DMA_RING_ID_BUF_SETUP(v)		(((v) << 9) | BIT(21))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define XGENE_DMA_RING_THRESLD0_SET1		0x30
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define XGENE_DMA_RING_THRESLD0_SET1_VAL	0X64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define XGENE_DMA_RING_THRESLD1_SET1		0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define XGENE_DMA_RING_THRESLD1_SET1_VAL	0xC8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define XGENE_DMA_RING_HYSTERESIS		0x68
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define XGENE_DMA_RING_HYSTERESIS_VAL		0xFFFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define XGENE_DMA_RING_STATE			0x6C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define XGENE_DMA_RING_STATE_WR_BASE		0x70
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define XGENE_DMA_RING_NE_INT_MODE		0x017C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define XGENE_DMA_RING_NE_INT_MODE_SET(m, v)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 	((m) = ((m) & ~BIT(31 - (v))) | BIT(31 - (v)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define XGENE_DMA_RING_NE_INT_MODE_RESET(m, v)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 	((m) &= (~BIT(31 - (v))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define XGENE_DMA_RING_CLKEN			0xC208
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define XGENE_DMA_RING_SRST			0xC200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define XGENE_DMA_RING_MEM_RAM_SHUTDOWN		0xD070
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define XGENE_DMA_RING_BLK_MEM_RDY		0xD074
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define XGENE_DMA_RING_BLK_MEM_RDY_VAL		0xFFFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define XGENE_DMA_RING_ID_GET(owner, num)	(((owner) << 6) | (num))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define XGENE_DMA_RING_DST_ID(v)		((1 << 10) | (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define XGENE_DMA_RING_CMD_OFFSET		0x2C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define XGENE_DMA_RING_CMD_BASE_OFFSET(v)	((v) << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define XGENE_DMA_RING_COHERENT_SET(m)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	(((u32 *)(m))[2] |= BIT(4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define XGENE_DMA_RING_ADDRL_SET(m, v)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	(((u32 *)(m))[2] |= (((v) >> 8) << 5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define XGENE_DMA_RING_ADDRH_SET(m, v)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	(((u32 *)(m))[3] |= ((v) >> 35))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define XGENE_DMA_RING_ACCEPTLERR_SET(m)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	(((u32 *)(m))[3] |= BIT(19))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define XGENE_DMA_RING_SIZE_SET(m, v)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	(((u32 *)(m))[3] |= ((v) << 23))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define XGENE_DMA_RING_RECOMBBUF_SET(m)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	(((u32 *)(m))[3] |= BIT(27))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define XGENE_DMA_RING_RECOMTIMEOUTL_SET(m)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	(((u32 *)(m))[3] |= (0x7 << 28))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define XGENE_DMA_RING_RECOMTIMEOUTH_SET(m)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	(((u32 *)(m))[4] |= 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define XGENE_DMA_RING_SELTHRSH_SET(m)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	(((u32 *)(m))[4] |= BIT(3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define XGENE_DMA_RING_TYPE_SET(m, v)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	(((u32 *)(m))[4] |= ((v) << 19))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) /* X-Gene DMA device csr registers and bit definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define XGENE_DMA_IPBRR				0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define XGENE_DMA_DEV_ID_RD(v)			((v) & 0x00000FFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define XGENE_DMA_BUS_ID_RD(v)			(((v) >> 12) & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #define XGENE_DMA_REV_NO_RD(v)			(((v) >> 14) & 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define XGENE_DMA_GCR				0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define XGENE_DMA_CH_SETUP(v)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	((v) = ((v) & ~0x000FFFFF) | 0x000AAFFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define XGENE_DMA_ENABLE(v)			((v) |= BIT(31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define XGENE_DMA_DISABLE(v)			((v) &= ~BIT(31))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define XGENE_DMA_RAID6_CONT			0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define XGENE_DMA_RAID6_MULTI_CTRL(v)		((v) << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #define XGENE_DMA_INT				0x70
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #define XGENE_DMA_INT_MASK			0x74
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define XGENE_DMA_INT_ALL_MASK			0xFFFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define XGENE_DMA_INT_ALL_UNMASK		0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #define XGENE_DMA_INT_MASK_SHIFT		0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #define XGENE_DMA_RING_INT0_MASK		0x90A0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define XGENE_DMA_RING_INT1_MASK		0x90A8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #define XGENE_DMA_RING_INT2_MASK		0x90B0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #define XGENE_DMA_RING_INT3_MASK		0x90B8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) #define XGENE_DMA_RING_INT4_MASK		0x90C0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #define XGENE_DMA_CFG_RING_WQ_ASSOC		0x90E0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #define XGENE_DMA_ASSOC_RING_MNGR1		0xFFFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define XGENE_DMA_MEM_RAM_SHUTDOWN		0xD070
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #define XGENE_DMA_BLK_MEM_RDY			0xD074
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #define XGENE_DMA_BLK_MEM_RDY_VAL		0xFFFFFFFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define XGENE_DMA_RING_CMD_SM_OFFSET		0x8000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) /* X-Gene SoC EFUSE csr register and bit defination */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) #define XGENE_SOC_JTAG1_SHADOW			0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #define XGENE_DMA_PQ_DISABLE_MASK		BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) /* X-Gene DMA Descriptor format */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) #define XGENE_DMA_DESC_NV_BIT			BIT_ULL(50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) #define XGENE_DMA_DESC_IN_BIT			BIT_ULL(55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #define XGENE_DMA_DESC_C_BIT			BIT_ULL(63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) #define XGENE_DMA_DESC_DR_BIT			BIT_ULL(61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) #define XGENE_DMA_DESC_ELERR_POS		46
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) #define XGENE_DMA_DESC_RTYPE_POS		56
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) #define XGENE_DMA_DESC_LERR_POS			60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) #define XGENE_DMA_DESC_BUFLEN_POS		48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) #define XGENE_DMA_DESC_HOENQ_NUM_POS		48
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) #define XGENE_DMA_DESC_ELERR_RD(m)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	(((m) >> XGENE_DMA_DESC_ELERR_POS) & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) #define XGENE_DMA_DESC_LERR_RD(m)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	(((m) >> XGENE_DMA_DESC_LERR_POS) & 0x7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) #define XGENE_DMA_DESC_STATUS(elerr, lerr)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	(((elerr) << 4) | (lerr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) /* X-Gene DMA descriptor empty s/w signature */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #define XGENE_DMA_DESC_EMPTY_SIGNATURE		~0ULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) /* X-Gene DMA configurable parameters defines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #define XGENE_DMA_RING_NUM		512
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) #define XGENE_DMA_BUFNUM		0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) #define XGENE_DMA_CPU_BUFNUM		0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) #define XGENE_DMA_RING_OWNER_DMA	0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) #define XGENE_DMA_RING_OWNER_CPU	0x0F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) #define XGENE_DMA_RING_TYPE_REGULAR	0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) #define XGENE_DMA_RING_WQ_DESC_SIZE	32	/* 32 Bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) #define XGENE_DMA_RING_NUM_CONFIG	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) #define XGENE_DMA_MAX_CHANNEL		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) #define XGENE_DMA_XOR_CHANNEL		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) #define XGENE_DMA_PQ_CHANNEL		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) #define XGENE_DMA_MAX_BYTE_CNT		0x4000	/* 16 KB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) #define XGENE_DMA_MAX_64B_DESC_BYTE_CNT	0x14000	/* 80 KB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) #define XGENE_DMA_MAX_XOR_SRC		5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) #define XGENE_DMA_16K_BUFFER_LEN_CODE	0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #define XGENE_DMA_INVALID_LEN_CODE	0x7800000000000000ULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) /* X-Gene DMA descriptor error codes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) #define ERR_DESC_AXI			0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) #define ERR_BAD_DESC			0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) #define ERR_READ_DATA_AXI		0x03
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) #define ERR_WRITE_DATA_AXI		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) #define ERR_FBP_TIMEOUT			0x05
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) #define ERR_ECC				0x06
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) #define ERR_DIFF_SIZE			0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) #define ERR_SCT_GAT_LEN			0x09
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) #define ERR_CRC_ERR			0x11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) #define ERR_CHKSUM			0x12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) #define ERR_DIF				0x13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) /* X-Gene DMA error interrupt codes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) #define ERR_DIF_SIZE_INT		0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) #define ERR_GS_ERR_INT			0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) #define ERR_FPB_TIMEO_INT		0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) #define ERR_WFIFO_OVF_INT		0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) #define ERR_RFIFO_OVF_INT		0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) #define ERR_WR_TIMEO_INT		0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) #define ERR_RD_TIMEO_INT		0x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) #define ERR_WR_ERR_INT			0x7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) #define ERR_RD_ERR_INT			0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) #define ERR_BAD_DESC_INT		0x9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) #define ERR_DESC_DST_INT		0xA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) #define ERR_DESC_SRC_INT		0xB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) /* X-Gene DMA flyby operation code */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) #define FLYBY_2SRC_XOR			0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) #define FLYBY_3SRC_XOR			0x90
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) #define FLYBY_4SRC_XOR			0xA0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) #define FLYBY_5SRC_XOR			0xB0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) /* X-Gene DMA SW descriptor flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) #define XGENE_DMA_FLAG_64B_DESC		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) /* Define to dump X-Gene DMA descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) #define XGENE_DMA_DESC_DUMP(desc, m)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	print_hex_dump(KERN_ERR, (m),	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 			DUMP_PREFIX_ADDRESS, 16, 8, (desc), 32, 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) #define to_dma_desc_sw(tx)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	container_of(tx, struct xgene_dma_desc_sw, tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) #define to_dma_chan(dchan)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	container_of(dchan, struct xgene_dma_chan, dma_chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) #define chan_dbg(chan, fmt, arg...)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) #define chan_err(chan, fmt, arg...)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) struct xgene_dma_desc_hw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	__le64 m0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	__le64 m1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	__le64 m2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	__le64 m3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) enum xgene_dma_ring_cfgsize {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	XGENE_DMA_RING_CFG_SIZE_512B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	XGENE_DMA_RING_CFG_SIZE_2KB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	XGENE_DMA_RING_CFG_SIZE_16KB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	XGENE_DMA_RING_CFG_SIZE_64KB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	XGENE_DMA_RING_CFG_SIZE_512KB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	XGENE_DMA_RING_CFG_SIZE_INVALID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) struct xgene_dma_ring {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	struct xgene_dma *pdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	u8 buf_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	u16 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	u16 num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	u16 head;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	u16 owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	u16 slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	u16 dst_ring_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	u32 size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	void __iomem *cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	void __iomem *cmd_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	dma_addr_t desc_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	u32 state[XGENE_DMA_RING_NUM_CONFIG];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	enum xgene_dma_ring_cfgsize cfgsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		void *desc_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		struct xgene_dma_desc_hw *desc_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) struct xgene_dma_desc_sw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	struct xgene_dma_desc_hw desc1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	struct xgene_dma_desc_hw desc2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	u32 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	struct list_head tx_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	struct dma_async_tx_descriptor tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245)  * struct xgene_dma_chan - internal representation of an X-Gene DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246)  * @dma_chan: dmaengine channel object member
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  * @pdma: X-Gene DMA device structure reference
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248)  * @dev: struct device reference for dma mapping api
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249)  * @id: raw id of this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250)  * @rx_irq: channel IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  * @name: name of X-Gene DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252)  * @lock: serializes enqueue/dequeue operations to the descriptor pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  * @pending: number of transaction request pushed to DMA controller for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)  *	execution, but still waiting for completion,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  * @max_outstanding: max number of outstanding request we can push to channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  * @ld_pending: descriptors which are queued to run, but have not yet been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)  *	submitted to the hardware for execution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)  * @ld_running: descriptors which are currently being executing by the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  * @ld_completed: descriptors which have finished execution by the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  *	These descriptors have already had their cleanup actions run. They
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  *	are waiting for the ACK bit to be set by the async tx API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  * @desc_pool: descriptor pool for DMA operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  * @tasklet: bottom half where all completed descriptors cleans
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264)  * @tx_ring: transmit ring descriptor that we use to prepare actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265)  *	descriptors for further executions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266)  * @rx_ring: receive ring descriptor that we use to get completed DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267)  *	descriptors during cleanup time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) struct xgene_dma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	struct dma_chan dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	struct xgene_dma *pdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	int rx_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	char name[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	int pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	int max_outstanding;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	struct list_head ld_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	struct list_head ld_running;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	struct list_head ld_completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	struct dma_pool *desc_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	struct tasklet_struct tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	struct xgene_dma_ring tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	struct xgene_dma_ring rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)  * struct xgene_dma - internal representation of an X-Gene DMA device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)  * @dev: reference to this device's struct device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)  * @clk: reference to this device's clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292)  * @err_irq: DMA error irq number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)  * @ring_num: start id number for DMA ring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)  * @csr_dma: base for DMA register access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)  * @csr_ring: base for DMA ring register access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296)  * @csr_ring_cmd: base for DMA ring command register access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297)  * @csr_efuse: base for efuse register access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298)  * @dma_dev: embedded struct dma_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299)  * @chan: reference to X-Gene DMA channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) struct xgene_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	int err_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	int ring_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 	void __iomem *csr_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 	void __iomem *csr_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	void __iomem *csr_ring_cmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	void __iomem *csr_efuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	struct dma_device dma_dev[XGENE_DMA_MAX_CHANNEL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	struct xgene_dma_chan chan[XGENE_DMA_MAX_CHANNEL];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) static const char * const xgene_dma_desc_err[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	[ERR_DESC_AXI] = "AXI error when reading src/dst link list",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	[ERR_BAD_DESC] = "ERR or El_ERR fields not set to zero in desc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	[ERR_READ_DATA_AXI] = "AXI error when reading data",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	[ERR_WRITE_DATA_AXI] = "AXI error when writing data",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	[ERR_FBP_TIMEOUT] = "Timeout on bufpool fetch",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	[ERR_ECC] = "ECC double bit error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	[ERR_DIFF_SIZE] = "Bufpool too small to hold all the DIF result",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	[ERR_SCT_GAT_LEN] = "Gather and scatter data length not same",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	[ERR_CRC_ERR] = "CRC error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	[ERR_CHKSUM] = "Checksum error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	[ERR_DIF] = "DIF error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) static const char * const xgene_dma_err[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	[ERR_DIF_SIZE_INT] = "DIF size error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	[ERR_GS_ERR_INT] = "Gather scatter not same size error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	[ERR_FPB_TIMEO_INT] = "Free pool time out error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	[ERR_WFIFO_OVF_INT] = "Write FIFO over flow error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	[ERR_RFIFO_OVF_INT] = "Read FIFO over flow error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	[ERR_WR_TIMEO_INT] = "Write time out error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	[ERR_RD_TIMEO_INT] = "Read time out error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 	[ERR_WR_ERR_INT] = "HBF bus write error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	[ERR_RD_ERR_INT] = "HBF bus read error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	[ERR_BAD_DESC_INT] = "Ring descriptor HE0 not set error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	[ERR_DESC_DST_INT] = "HFB reading dst link address error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	[ERR_DESC_SRC_INT] = "HFB reading src link address error",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) static bool is_pq_enabled(struct xgene_dma *pdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	val = ioread32(pdma->csr_efuse + XGENE_SOC_JTAG1_SHADOW);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	return !(val & XGENE_DMA_PQ_DISABLE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) static u64 xgene_dma_encode_len(size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	return (len < XGENE_DMA_MAX_BYTE_CNT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		((u64)len << XGENE_DMA_DESC_BUFLEN_POS) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 		XGENE_DMA_16K_BUFFER_LEN_CODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	static u8 flyby_type[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		FLYBY_2SRC_XOR, /* Dummy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 		FLYBY_2SRC_XOR, /* Dummy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		FLYBY_2SRC_XOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		FLYBY_3SRC_XOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		FLYBY_4SRC_XOR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		FLYBY_5SRC_XOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	return flyby_type[src_cnt];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 				     dma_addr_t *paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	size_t nbytes = (*len < XGENE_DMA_MAX_BYTE_CNT) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			*len : XGENE_DMA_MAX_BYTE_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	*ext8 |= cpu_to_le64(*paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	*ext8 |= cpu_to_le64(xgene_dma_encode_len(nbytes));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	*len -= nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	*paddr += nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) static __le64 *xgene_dma_lookup_ext8(struct xgene_dma_desc_hw *desc, int idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	switch (idx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		return &desc->m1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		return &desc->m0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		return &desc->m3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		return &desc->m2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		pr_err("Invalid dma descriptor index\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) static void xgene_dma_init_desc(struct xgene_dma_desc_hw *desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 				u16 dst_ring_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	desc->m0 |= cpu_to_le64(XGENE_DMA_DESC_IN_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	desc->m0 |= cpu_to_le64((u64)XGENE_DMA_RING_OWNER_DMA <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 				XGENE_DMA_DESC_RTYPE_POS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	desc->m1 |= cpu_to_le64(XGENE_DMA_DESC_C_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	desc->m3 |= cpu_to_le64((u64)dst_ring_num <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 				XGENE_DMA_DESC_HOENQ_NUM_POS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) static void xgene_dma_prep_xor_desc(struct xgene_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 				    struct xgene_dma_desc_sw *desc_sw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 				    dma_addr_t *dst, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 				    u32 src_cnt, size_t *nbytes,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 				    const u8 *scf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	struct xgene_dma_desc_hw *desc1, *desc2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	size_t len = *nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	desc1 = &desc_sw->desc1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	desc2 = &desc_sw->desc2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	/* Initialize DMA descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	xgene_dma_init_desc(desc1, chan->tx_ring.dst_ring_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	/* Set destination address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	desc1->m2 |= cpu_to_le64(XGENE_DMA_DESC_DR_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	desc1->m3 |= cpu_to_le64(*dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 	/* We have multiple source addresses, so need to set NV bit*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	desc1->m0 |= cpu_to_le64(XGENE_DMA_DESC_NV_BIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	/* Set flyby opcode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	desc1->m2 |= cpu_to_le64(xgene_dma_encode_xor_flyby(src_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	/* Set 1st to 5th source addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	for (i = 0; i < src_cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		len = *nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 		xgene_dma_set_src_buffer((i == 0) ? &desc1->m1 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 					 xgene_dma_lookup_ext8(desc2, i - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 					 &len, &src[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 		desc1->m2 |= cpu_to_le64((scf[i] << ((i + 1) * 8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	/* Update meta data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	*nbytes = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	*dst += XGENE_DMA_MAX_BYTE_CNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	/* We need always 64B descriptor to perform xor or pq operations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	desc_sw->flags |= XGENE_DMA_FLAG_64B_DESC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) static dma_cookie_t xgene_dma_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	struct xgene_dma_desc_sw *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	struct xgene_dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	if (unlikely(!tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	chan = to_dma_chan(tx->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	desc = to_dma_desc_sw(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	spin_lock_bh(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	cookie = dma_cookie_assign(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	/* Add this transaction list onto the tail of the pending queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	spin_unlock_bh(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) static void xgene_dma_clean_descriptor(struct xgene_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 				       struct xgene_dma_desc_sw *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	chan_dbg(chan, "LD %p free\n", desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) static struct xgene_dma_desc_sw *xgene_dma_alloc_descriptor(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 				 struct xgene_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	struct xgene_dma_desc_sw *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	dma_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	desc = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		chan_err(chan, "Failed to allocate LDs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	INIT_LIST_HEAD(&desc->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	desc->tx.phys = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	desc->tx.tx_submit = xgene_dma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	dma_async_tx_descriptor_init(&desc->tx, &chan->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	chan_dbg(chan, "LD %p allocated\n", desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511)  * xgene_dma_clean_completed_descriptor - free all descriptors which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512)  * has been completed and acked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513)  * @chan: X-Gene DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515)  * This function is used on all completed and acked descriptors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) static void xgene_dma_clean_completed_descriptor(struct xgene_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	struct xgene_dma_desc_sw *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	/* Run the callback for each descriptor, in order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		if (async_tx_test_ack(&desc->tx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			xgene_dma_clean_descriptor(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  * xgene_dma_run_tx_complete_actions - cleanup a single link descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  * @chan: X-Gene DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531)  * @desc: descriptor to cleanup and free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533)  * This function is used on a descriptor which has been executed by the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534)  * controller. It will run any callbacks, submit any dependencies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) static void xgene_dma_run_tx_complete_actions(struct xgene_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 					      struct xgene_dma_desc_sw *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	struct dma_async_tx_descriptor *tx = &desc->tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	 * If this is not the last transaction in the group,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	 * then no need to complete cookie and run any callback as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	 * this is not the tx_descriptor which had been sent to caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	 * of this DMA request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	if (tx->cookie == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	dma_cookie_complete(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	dma_descriptor_unmap(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	/* Run the link descriptor callback function */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	dmaengine_desc_get_callback_invoke(tx, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	/* Run any dependencies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	dma_run_dependencies(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562)  * xgene_dma_clean_running_descriptor - move the completed descriptor from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  * ld_running to ld_completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  * @chan: X-Gene DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565)  * @desc: the descriptor which is completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567)  * Free the descriptor directly if acked by async_tx api,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568)  * else move it to queue ld_completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 					       struct xgene_dma_desc_sw *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	/* Remove from the list of running transactions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	list_del(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	 * the client is allowed to attach dependent operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	 * until 'ack' is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	if (!async_tx_test_ack(&desc->tx)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		 * Move this descriptor to the list of descriptors which is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		 * completed, but still awaiting the 'ack' bit to be set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		list_add_tail(&desc->node, &chan->ld_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	chan_dbg(chan, "LD %p free\n", desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 				    struct xgene_dma_desc_sw *desc_sw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	struct xgene_dma_ring *ring = &chan->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	struct xgene_dma_desc_hw *desc_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	/* Get hw descriptor from DMA tx ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	desc_hw = &ring->desc_hw[ring->head];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	 * Increment the head count to point next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	 * descriptor for next time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	if (++ring->head == ring->slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		ring->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	/* Copy prepared sw descriptor data to hw descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	memcpy(desc_hw, &desc_sw->desc1, sizeof(*desc_hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	 * Check if we have prepared 64B descriptor,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	 * in this case we need one more hw descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		desc_hw = &ring->desc_hw[ring->head];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		if (++ring->head == ring->slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			ring->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	/* Increment the pending transaction count */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	chan->pending += ((desc_sw->flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 			  XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	/* Notify the hw that we have descriptor ready for execution */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		  2 : 1, ring->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635)  * xgene_chan_xfer_ld_pending - push any pending transactions to hw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636)  * @chan : X-Gene DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638)  * LOCKING: must hold chan->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	 * If the list of pending descriptors is empty, then we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	 * don't need to do any work at all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	if (list_empty(&chan->ld_pending)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		chan_dbg(chan, "No pending LDs\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	 * Move elements from the queue of pending transactions onto the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	 * of running transactions and push it to hw for further executions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_pending, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		 * Check if have pushed max number of transactions to hw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		 * as capable, so let's stop here and will push remaining
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		 * elements from pening ld queue after completing some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		 * descriptors that we have already pushed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		if (chan->pending >= chan->max_outstanding)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		xgene_chan_xfer_request(chan, desc_sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		 * Delete this element from ld pending queue and append it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		 * ld running queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		list_move_tail(&desc_sw->node, &chan->ld_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678)  * xgene_dma_cleanup_descriptors - cleanup link descriptors which are completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679)  * and move them to ld_completed to free until flag 'ack' is set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  * @chan: X-Gene DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682)  * This function is used on descriptors which have been executed by the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683)  * controller. It will run any callbacks, submit any dependencies, then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684)  * free these descriptors if flag 'ack' is set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	struct xgene_dma_ring *ring = &chan->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	struct xgene_dma_desc_hw *desc_hw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	struct list_head ld_completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	u8 status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	INIT_LIST_HEAD(&ld_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	spin_lock(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	/* Clean already completed and acked descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	xgene_dma_clean_completed_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	/* Move all completed descriptors to ld completed queue, in order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	list_for_each_entry_safe(desc_sw, _desc_sw, &chan->ld_running, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 		/* Get subsequent hw descriptor from DMA rx ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		desc_hw = &ring->desc_hw[ring->head];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		/* Check if this descriptor has been completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		if (unlikely(le64_to_cpu(desc_hw->m0) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			     XGENE_DMA_DESC_EMPTY_SIGNATURE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		if (++ring->head == ring->slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			ring->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		/* Check if we have any error with DMA transactions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		status = XGENE_DMA_DESC_STATUS(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 				XGENE_DMA_DESC_ELERR_RD(le64_to_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 							desc_hw->m0)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 				XGENE_DMA_DESC_LERR_RD(le64_to_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 						       desc_hw->m0)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		if (status) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			/* Print the DMA error type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 			chan_err(chan, "%s\n", xgene_dma_desc_err[status]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 			 * We have DMA transactions error here. Dump DMA Tx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 			 * and Rx descriptors for this request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 			XGENE_DMA_DESC_DUMP(&desc_sw->desc1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 					    "X-Gene DMA TX DESC1: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 			if (desc_sw->flags & XGENE_DMA_FLAG_64B_DESC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 				XGENE_DMA_DESC_DUMP(&desc_sw->desc2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 						    "X-Gene DMA TX DESC2: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 			XGENE_DMA_DESC_DUMP(desc_hw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 					    "X-Gene DMA RX ERR DESC: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		/* Notify the hw about this completed descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 		iowrite32(-1, ring->cmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 		/* Mark this hw descriptor as processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		desc_hw->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		 * Decrement the pending transaction count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 		 * as we have processed one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		chan->pending -= ((desc_sw->flags &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 				  XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		 * Delete this node from ld running queue and append it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		 * ld completed queue for further processing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 		list_move_tail(&desc_sw->node, &ld_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	 * Start any pending transactions automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	 * In the ideal case, we keep the DMA controller busy while we go
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	 * ahead and free the descriptors below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	xgene_chan_xfer_ld_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	spin_unlock(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	/* Run the callback for each descriptor, in order */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		xgene_dma_run_tx_complete_actions(chan, desc_sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		xgene_dma_clean_running_descriptor(chan, desc_sw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	struct xgene_dma_chan *chan = to_dma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	/* Has this channel already been allocated? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	if (chan->desc_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 	chan->desc_pool = dma_pool_create(chan->name, chan->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 					  sizeof(struct xgene_dma_desc_sw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 					  0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	if (!chan->desc_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		chan_err(chan, "Failed to allocate descriptor pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	chan_dbg(chan, "Allocate descriptor pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796)  * xgene_dma_free_desc_list - Free all descriptors in a queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797)  * @chan: X-Gene DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798)  * @list: the list to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800)  * LOCKING: must hold chan->lock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) static void xgene_dma_free_desc_list(struct xgene_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 				     struct list_head *list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	struct xgene_dma_desc_sw *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	list_for_each_entry_safe(desc, _desc, list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 		xgene_dma_clean_descriptor(chan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	struct xgene_dma_chan *chan = to_dma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	chan_dbg(chan, "Free all resources\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	if (!chan->desc_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	/* Process all running descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	xgene_dma_cleanup_descriptors(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	spin_lock_bh(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	/* Clean all link descriptor queues */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	xgene_dma_free_desc_list(chan, &chan->ld_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	xgene_dma_free_desc_list(chan, &chan->ld_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	xgene_dma_free_desc_list(chan, &chan->ld_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	spin_unlock_bh(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	/* Delete this channel DMA pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	dma_pool_destroy(chan->desc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	chan->desc_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) static struct dma_async_tx_descriptor *xgene_dma_prep_xor(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	struct dma_chan *dchan, dma_addr_t dst,	dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	u32 src_cnt, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	struct xgene_dma_desc_sw *first = NULL, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	struct xgene_dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 				0x01, 0x01, 0x01, 0x01, 0x01};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	if (unlikely(!dchan || !len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	chan = to_dma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		/* Allocate the link descriptor from DMA pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		new = xgene_dma_alloc_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		/* Prepare xor DMA descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		xgene_dma_prep_xor_desc(chan, new, &dst, src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 					src_cnt, &len, multi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			first = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		new->tx.cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		async_tx_ack(&new->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 		/* Insert the link descriptor to the LD ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		list_add_tail(&new->node, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	} while (len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	new->tx.flags = flags; /* client is in control of this ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	new->tx.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	list_splice(&first->tx_list, &new->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	return &new->tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	xgene_dma_free_desc_list(chan, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) static struct dma_async_tx_descriptor *xgene_dma_prep_pq(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	struct xgene_dma_desc_sw *first = NULL, *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	struct xgene_dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	size_t _len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	dma_addr_t _src[XGENE_DMA_MAX_XOR_SRC];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	static u8 multi[XGENE_DMA_MAX_XOR_SRC] = {0x01, 0x01, 0x01, 0x01, 0x01};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	if (unlikely(!dchan || !len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 	chan = to_dma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	 * Save source addresses on local variable, may be we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	 * prepare two descriptor to generate P and Q if both enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	 * in the flags by client
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	memcpy(_src, src, sizeof(*src) * src_cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	if (flags & DMA_PREP_PQ_DISABLE_P)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	if (flags & DMA_PREP_PQ_DISABLE_Q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 		_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		/* Allocate the link descriptor from DMA pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		new = xgene_dma_alloc_descriptor(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		if (!new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 			first = new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		new->tx.cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		async_tx_ack(&new->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		/* Insert the link descriptor to the LD ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		list_add_tail(&new->node, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		 * Prepare DMA descriptor to generate P,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 		 * if DMA_PREP_PQ_DISABLE_P flag is not set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		if (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			xgene_dma_prep_xor_desc(chan, new, &dst[0], src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 						src_cnt, &len, multi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		 * Prepare DMA descriptor to generate Q,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 		 * if DMA_PREP_PQ_DISABLE_Q flag is not set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		if (_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			xgene_dma_prep_xor_desc(chan, new, &dst[1], _src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 						src_cnt, &_len, scf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	} while (len || _len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	new->tx.flags = flags; /* client is in control of this ack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	new->tx.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	list_splice(&first->tx_list, &new->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	return &new->tx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	xgene_dma_free_desc_list(chan, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) static void xgene_dma_issue_pending(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	struct xgene_dma_chan *chan = to_dma_chan(dchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	spin_lock_bh(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	xgene_chan_xfer_ld_pending(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	spin_unlock_bh(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) static enum dma_status xgene_dma_tx_status(struct dma_chan *dchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 					   dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 					   struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	return dma_cookie_status(dchan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) static void xgene_dma_tasklet_cb(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	struct xgene_dma_chan *chan = from_tasklet(chan, t, tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	/* Run all cleanup for descriptors which have been completed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	xgene_dma_cleanup_descriptors(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	/* Re-enable DMA channel IRQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	enable_irq(chan->rx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) static irqreturn_t xgene_dma_chan_ring_isr(int irq, void *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	struct xgene_dma_chan *chan = (struct xgene_dma_chan *)id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	BUG_ON(!chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 	 * Disable DMA channel IRQ until we process completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	 * descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	disable_irq_nosync(chan->rx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	 * Schedule the tasklet to handle all cleanup of the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	 * transaction. It will start a new transaction if there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	 * one pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	tasklet_schedule(&chan->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) static irqreturn_t xgene_dma_err_isr(int irq, void *id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	struct xgene_dma *pdma = (struct xgene_dma *)id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	unsigned long int_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	u32 val, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	val = ioread32(pdma->csr_dma + XGENE_DMA_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	/* Clear DMA interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	iowrite32(val, pdma->csr_dma + XGENE_DMA_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	/* Print DMA error info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	int_mask = val >> XGENE_DMA_INT_MASK_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	for_each_set_bit(i, &int_mask, ARRAY_SIZE(xgene_dma_err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		dev_err(pdma->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			"Interrupt status 0x%08X %s\n", val, xgene_dma_err[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static void xgene_dma_wr_ring_state(struct xgene_dma_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	iowrite32(ring->num, ring->pdma->csr_ring + XGENE_DMA_RING_STATE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	for (i = 0; i < XGENE_DMA_RING_NUM_CONFIG; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		iowrite32(ring->state[i], ring->pdma->csr_ring +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 			  XGENE_DMA_RING_STATE_WR_BASE + (i * 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) static void xgene_dma_clr_ring_state(struct xgene_dma_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	memset(ring->state, 0, sizeof(u32) * XGENE_DMA_RING_NUM_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	xgene_dma_wr_ring_state(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) static void xgene_dma_setup_ring(struct xgene_dma_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	void *ring_cfg = ring->state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	u64 addr = ring->desc_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	u32 i, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	ring->slots = ring->size / XGENE_DMA_RING_WQ_DESC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	/* Clear DMA ring state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	xgene_dma_clr_ring_state(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	/* Set DMA ring type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	XGENE_DMA_RING_TYPE_SET(ring_cfg, XGENE_DMA_RING_TYPE_REGULAR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	if (ring->owner == XGENE_DMA_RING_OWNER_DMA) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		/* Set recombination buffer and timeout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 		XGENE_DMA_RING_RECOMBBUF_SET(ring_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 		XGENE_DMA_RING_RECOMTIMEOUTL_SET(ring_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		XGENE_DMA_RING_RECOMTIMEOUTH_SET(ring_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	/* Initialize DMA ring state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	XGENE_DMA_RING_SELTHRSH_SET(ring_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	XGENE_DMA_RING_ACCEPTLERR_SET(ring_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	XGENE_DMA_RING_COHERENT_SET(ring_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	XGENE_DMA_RING_ADDRL_SET(ring_cfg, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	XGENE_DMA_RING_ADDRH_SET(ring_cfg, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	XGENE_DMA_RING_SIZE_SET(ring_cfg, ring->cfgsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	/* Write DMA ring configurations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	xgene_dma_wr_ring_state(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	/* Set DMA ring id */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	iowrite32(XGENE_DMA_RING_ID_SETUP(ring->id),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 		  ring->pdma->csr_ring + XGENE_DMA_RING_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	/* Set DMA ring buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	iowrite32(XGENE_DMA_RING_ID_BUF_SETUP(ring->num),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		  ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	if (ring->owner != XGENE_DMA_RING_OWNER_CPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	/* Set empty signature to DMA Rx ring descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	for (i = 0; i < ring->slots; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		struct xgene_dma_desc_hw *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		desc = &ring->desc_hw[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 		desc->m0 = cpu_to_le64(XGENE_DMA_DESC_EMPTY_SIGNATURE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	/* Enable DMA Rx ring interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	val = ioread32(ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	XGENE_DMA_RING_NE_INT_MODE_SET(val, ring->buf_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	iowrite32(val, ring->pdma->csr_ring + XGENE_DMA_RING_NE_INT_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) static void xgene_dma_clear_ring(struct xgene_dma_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	u32 ring_id, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	if (ring->owner == XGENE_DMA_RING_OWNER_CPU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		/* Disable DMA Rx ring interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		val = ioread32(ring->pdma->csr_ring +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 			       XGENE_DMA_RING_NE_INT_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 		XGENE_DMA_RING_NE_INT_MODE_RESET(val, ring->buf_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		iowrite32(val, ring->pdma->csr_ring +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			  XGENE_DMA_RING_NE_INT_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	/* Clear DMA ring state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 	ring_id = XGENE_DMA_RING_ID_SETUP(ring->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	iowrite32(ring_id, ring->pdma->csr_ring + XGENE_DMA_RING_ID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	iowrite32(0, ring->pdma->csr_ring + XGENE_DMA_RING_ID_BUF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	xgene_dma_clr_ring_state(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static void xgene_dma_set_ring_cmd(struct xgene_dma_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	ring->cmd_base = ring->pdma->csr_ring_cmd +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 				XGENE_DMA_RING_CMD_BASE_OFFSET((ring->num -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 							  XGENE_DMA_RING_NUM));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	ring->cmd = ring->cmd_base + XGENE_DMA_RING_CMD_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) static int xgene_dma_get_ring_size(struct xgene_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 				   enum xgene_dma_ring_cfgsize cfgsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	int size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	switch (cfgsize) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	case XGENE_DMA_RING_CFG_SIZE_512B:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		size = 0x200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	case XGENE_DMA_RING_CFG_SIZE_2KB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		size = 0x800;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	case XGENE_DMA_RING_CFG_SIZE_16KB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		size = 0x4000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	case XGENE_DMA_RING_CFG_SIZE_64KB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		size = 0x10000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	case XGENE_DMA_RING_CFG_SIZE_512KB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		size = 0x80000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		chan_err(chan, "Unsupported cfg ring size %d\n", cfgsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) static void xgene_dma_delete_ring_one(struct xgene_dma_ring *ring)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	/* Clear DMA ring configurations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	xgene_dma_clear_ring(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	/* De-allocate DMA ring descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	if (ring->desc_vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		dma_free_coherent(ring->pdma->dev, ring->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 				  ring->desc_vaddr, ring->desc_paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		ring->desc_vaddr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) static void xgene_dma_delete_chan_rings(struct xgene_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	xgene_dma_delete_ring_one(&chan->rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	xgene_dma_delete_ring_one(&chan->tx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 				     struct xgene_dma_ring *ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 				     enum xgene_dma_ring_cfgsize cfgsize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	/* Setup DMA ring descriptor variables */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	ring->pdma = chan->pdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	ring->cfgsize = cfgsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	ring->num = chan->pdma->ring_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	ret = xgene_dma_get_ring_size(chan, cfgsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	if (ret <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	ring->size = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	/* Allocate memory for DMA ring descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	ring->desc_vaddr = dma_alloc_coherent(chan->dev, ring->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 					      &ring->desc_paddr, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	if (!ring->desc_vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		chan_err(chan, "Failed to allocate ring desc\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	/* Configure and enable DMA ring */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	xgene_dma_set_ring_cmd(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	xgene_dma_setup_ring(ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	struct xgene_dma_ring *rx_ring = &chan->rx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	struct xgene_dma_ring *tx_ring = &chan->tx_ring;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	/* Create DMA Rx ring descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	rx_ring->owner = XGENE_DMA_RING_OWNER_CPU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	rx_ring->buf_num = XGENE_DMA_CPU_BUFNUM + chan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	ret = xgene_dma_create_ring_one(chan, rx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 					XGENE_DMA_RING_CFG_SIZE_64KB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	chan_dbg(chan, "Rx ring id 0x%X num %d desc 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		 rx_ring->id, rx_ring->num, rx_ring->desc_vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	/* Create DMA Tx ring descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	tx_ring->owner = XGENE_DMA_RING_OWNER_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	tx_ring->buf_num = XGENE_DMA_BUFNUM + chan->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	ret = xgene_dma_create_ring_one(chan, tx_ring,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 					XGENE_DMA_RING_CFG_SIZE_64KB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		xgene_dma_delete_ring_one(rx_ring);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	tx_ring->dst_ring_num = XGENE_DMA_RING_DST_ID(rx_ring->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	chan_dbg(chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 		 "Tx ring id 0x%X num %d desc 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		 tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 	/* Set the max outstanding request possible to this channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	chan->max_outstanding = tx_ring->slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) static int xgene_dma_init_rings(struct xgene_dma *pdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	int ret, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		ret = xgene_dma_create_chan_rings(&pdma->chan[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 			for (j = 0; j < i; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 				xgene_dma_delete_chan_rings(&pdma->chan[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) static void xgene_dma_enable(struct xgene_dma *pdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	/* Configure and enable DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	val = ioread32(pdma->csr_dma + XGENE_DMA_GCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	XGENE_DMA_CH_SETUP(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	XGENE_DMA_ENABLE(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) static void xgene_dma_disable(struct xgene_dma *pdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	val = ioread32(pdma->csr_dma + XGENE_DMA_GCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	XGENE_DMA_DISABLE(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	iowrite32(val, pdma->csr_dma + XGENE_DMA_GCR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static void xgene_dma_mask_interrupts(struct xgene_dma *pdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	 * Mask DMA ring overflow, underflow and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	 * AXI write/read error interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	iowrite32(XGENE_DMA_INT_ALL_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 		  pdma->csr_dma + XGENE_DMA_RING_INT0_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	iowrite32(XGENE_DMA_INT_ALL_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		  pdma->csr_dma + XGENE_DMA_RING_INT1_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	iowrite32(XGENE_DMA_INT_ALL_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 		  pdma->csr_dma + XGENE_DMA_RING_INT2_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	iowrite32(XGENE_DMA_INT_ALL_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		  pdma->csr_dma + XGENE_DMA_RING_INT3_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	iowrite32(XGENE_DMA_INT_ALL_MASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		  pdma->csr_dma + XGENE_DMA_RING_INT4_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	/* Mask DMA error interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	iowrite32(XGENE_DMA_INT_ALL_MASK, pdma->csr_dma + XGENE_DMA_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) static void xgene_dma_unmask_interrupts(struct xgene_dma *pdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	 * Unmask DMA ring overflow, underflow and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	 * AXI write/read error interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	iowrite32(XGENE_DMA_INT_ALL_UNMASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		  pdma->csr_dma + XGENE_DMA_RING_INT0_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	iowrite32(XGENE_DMA_INT_ALL_UNMASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 		  pdma->csr_dma + XGENE_DMA_RING_INT1_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	iowrite32(XGENE_DMA_INT_ALL_UNMASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		  pdma->csr_dma + XGENE_DMA_RING_INT2_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	iowrite32(XGENE_DMA_INT_ALL_UNMASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		  pdma->csr_dma + XGENE_DMA_RING_INT3_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	iowrite32(XGENE_DMA_INT_ALL_UNMASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		  pdma->csr_dma + XGENE_DMA_RING_INT4_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	/* Unmask DMA error interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	iowrite32(XGENE_DMA_INT_ALL_UNMASK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		  pdma->csr_dma + XGENE_DMA_INT_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) static void xgene_dma_init_hw(struct xgene_dma *pdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	/* Associate DMA ring to corresponding ring HW */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	iowrite32(XGENE_DMA_ASSOC_RING_MNGR1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		  pdma->csr_dma + XGENE_DMA_CFG_RING_WQ_ASSOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	/* Configure RAID6 polynomial control setting */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	if (is_pq_enabled(pdma))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		iowrite32(XGENE_DMA_RAID6_MULTI_CTRL(0x1D),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 			  pdma->csr_dma + XGENE_DMA_RAID6_CONT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		dev_info(pdma->dev, "PQ is disabled in HW\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	xgene_dma_enable(pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	xgene_dma_unmask_interrupts(pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	/* Get DMA id and version info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	val = ioread32(pdma->csr_dma + XGENE_DMA_IPBRR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	/* DMA device info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	dev_info(pdma->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		 "X-Gene DMA v%d.%02d.%02d driver registered %d channels",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		 XGENE_DMA_REV_NO_RD(val), XGENE_DMA_BUS_ID_RD(val),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		 XGENE_DMA_DEV_ID_RD(val), XGENE_DMA_MAX_CHANNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) static int xgene_dma_init_ring_mngr(struct xgene_dma *pdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	if (ioread32(pdma->csr_ring + XGENE_DMA_RING_CLKEN) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	    (!ioread32(pdma->csr_ring + XGENE_DMA_RING_SRST)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	iowrite32(0x3, pdma->csr_ring + XGENE_DMA_RING_CLKEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_SRST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	/* Bring up memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	iowrite32(0x0, pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	/* Force a barrier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	ioread32(pdma->csr_ring + XGENE_DMA_RING_MEM_RAM_SHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	/* reset may take up to 1ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	usleep_range(1000, 1100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	if (ioread32(pdma->csr_ring + XGENE_DMA_RING_BLK_MEM_RDY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		!= XGENE_DMA_RING_BLK_MEM_RDY_VAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		dev_err(pdma->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 			"Failed to release ring mngr memory from shutdown\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	/* program threshold set 1 and all hysteresis */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	iowrite32(XGENE_DMA_RING_THRESLD0_SET1_VAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		  pdma->csr_ring + XGENE_DMA_RING_THRESLD0_SET1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	iowrite32(XGENE_DMA_RING_THRESLD1_SET1_VAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		  pdma->csr_ring + XGENE_DMA_RING_THRESLD1_SET1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	iowrite32(XGENE_DMA_RING_HYSTERESIS_VAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		  pdma->csr_ring + XGENE_DMA_RING_HYSTERESIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	/* Enable QPcore and assign error queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	iowrite32(XGENE_DMA_RING_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 		  pdma->csr_ring + XGENE_DMA_RING_CONFIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) static int xgene_dma_init_mem(struct xgene_dma *pdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	ret = xgene_dma_init_ring_mngr(pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	/* Bring up memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	iowrite32(0x0, pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	/* Force a barrier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	ioread32(pdma->csr_dma + XGENE_DMA_MEM_RAM_SHUTDOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	/* reset may take up to 1ms */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 	usleep_range(1000, 1100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	if (ioread32(pdma->csr_dma + XGENE_DMA_BLK_MEM_RDY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		!= XGENE_DMA_BLK_MEM_RDY_VAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 		dev_err(pdma->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 			"Failed to release DMA memory from shutdown\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) static int xgene_dma_request_irqs(struct xgene_dma *pdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	struct xgene_dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	int ret, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	/* Register DMA error irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	ret = devm_request_irq(pdma->dev, pdma->err_irq, xgene_dma_err_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 			       0, "dma_error", pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		dev_err(pdma->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 			"Failed to register error IRQ %d\n", pdma->err_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	/* Register DMA channel rx irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		chan = &pdma->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		irq_set_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 		ret = devm_request_irq(chan->dev, chan->rx_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 				       xgene_dma_chan_ring_isr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 				       0, chan->name, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 			chan_err(chan, "Failed to register Rx IRQ %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 				 chan->rx_irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 			devm_free_irq(pdma->dev, pdma->err_irq, pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 			for (j = 0; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 				chan = &pdma->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 				irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 				devm_free_irq(chan->dev, chan->rx_irq, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) static void xgene_dma_free_irqs(struct xgene_dma *pdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	struct xgene_dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	/* Free DMA device error irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	devm_free_irq(pdma->dev, pdma->err_irq, pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		chan = &pdma->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		devm_free_irq(chan->dev, chan->rx_irq, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			       struct dma_device *dma_dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	/* Initialize DMA device capability mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 	dma_cap_zero(dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	/* Set DMA device capability */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	/* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	 * and channel 1 supports XOR, PQ both. First thing here is we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	 * mechanism in hw to enable/disable PQ/XOR supports on channel 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	 * we can make sure this by reading SoC Efuse register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	 * Second thing, we have hw errata that if we run channel 0 and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 	 * channel 1 simultaneously with executing XOR and PQ request,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 	 * suddenly DMA engine hangs, So here we enable XOR on channel 0 only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 	 * if XOR and PQ supports on channel 1 is disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	if ((chan->id == XGENE_DMA_PQ_CHANNEL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	    is_pq_enabled(chan->pdma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 		dma_cap_set(DMA_PQ, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 		dma_cap_set(DMA_XOR, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	} else if ((chan->id == XGENE_DMA_XOR_CHANNEL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 		   !is_pq_enabled(chan->pdma)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 		dma_cap_set(DMA_XOR, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 	/* Set base and prep routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 	dma_dev->dev = chan->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	dma_dev->device_alloc_chan_resources = xgene_dma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	dma_dev->device_issue_pending = xgene_dma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	dma_dev->device_tx_status = xgene_dma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		dma_dev->device_prep_dma_xor = xgene_dma_prep_xor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		dma_dev->max_xor = XGENE_DMA_MAX_XOR_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		dma_dev->xor_align = DMAENGINE_ALIGN_64_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 		dma_dev->device_prep_dma_pq = xgene_dma_prep_pq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 		dma_dev->max_pq = XGENE_DMA_MAX_XOR_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 		dma_dev->pq_align = DMAENGINE_ALIGN_64_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 	struct xgene_dma_chan *chan = &pdma->chan[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	struct dma_device *dma_dev = &pdma->dma_dev[id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	chan->dma_chan.device = dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	spin_lock_init(&chan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 	INIT_LIST_HEAD(&chan->ld_pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 	INIT_LIST_HEAD(&chan->ld_running);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 	INIT_LIST_HEAD(&chan->ld_completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	tasklet_setup(&chan->tasklet, xgene_dma_tasklet_cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	chan->pending = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	chan->desc_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	dma_cookie_init(&chan->dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	/* Setup dma device capabilities and prep routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	xgene_dma_set_caps(chan, dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	/* Initialize DMA device list head */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	INIT_LIST_HEAD(&dma_dev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	list_add_tail(&chan->dma_chan.device_node, &dma_dev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 	/* Register with Linux async DMA framework*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	ret = dma_async_device_register(dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		chan_err(chan, "Failed to register async device %d", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		tasklet_kill(&chan->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	/* DMA capability info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	dev_info(pdma->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		 "%s: CAPABILITY ( %s%s)\n", dma_chan_name(&chan->dma_chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) static int xgene_dma_init_async(struct xgene_dma *pdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 	int ret, i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 	for (i = 0; i < XGENE_DMA_MAX_CHANNEL ; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		ret = xgene_dma_async_register(pdma, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 			for (j = 0; j < i; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 				dma_async_device_unregister(&pdma->dma_dev[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 				tasklet_kill(&pdma->chan[j].tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) static void xgene_dma_async_unregister(struct xgene_dma *pdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 		dma_async_device_unregister(&pdma->dma_dev[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) static void xgene_dma_init_channels(struct xgene_dma *pdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	struct xgene_dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	pdma->ring_num = XGENE_DMA_RING_NUM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 		chan = &pdma->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 		chan->dev = pdma->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		chan->pdma = pdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		chan->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		snprintf(chan->name, sizeof(chan->name), "dmachan%d", chan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) static int xgene_dma_get_resources(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 				   struct xgene_dma *pdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	int irq, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	/* Get DMA csr region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 	if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 		dev_err(&pdev->dev, "Failed to get csr region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 	pdma->csr_dma = devm_ioremap(&pdev->dev, res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 				     resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 	if (!pdma->csr_dma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 		dev_err(&pdev->dev, "Failed to ioremap csr region");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 	/* Get DMA ring csr region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		dev_err(&pdev->dev, "Failed to get ring csr region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	pdma->csr_ring =  devm_ioremap(&pdev->dev, res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 				       resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 	if (!pdma->csr_ring) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 		dev_err(&pdev->dev, "Failed to ioremap ring csr region");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	/* Get DMA ring cmd csr region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		dev_err(&pdev->dev, "Failed to get ring cmd csr region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 	pdma->csr_ring_cmd = devm_ioremap(&pdev->dev, res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 					  resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	if (!pdma->csr_ring_cmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 		dev_err(&pdev->dev, "Failed to ioremap ring cmd csr region");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	/* Get efuse csr region */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		dev_err(&pdev->dev, "Failed to get efuse csr region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	pdma->csr_efuse = devm_ioremap(&pdev->dev, res->start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 				       resource_size(res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	if (!pdma->csr_efuse) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		dev_err(&pdev->dev, "Failed to ioremap efuse csr region");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 	/* Get DMA error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 	irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	if (irq <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 		return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 	pdma->err_irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	/* Get DMA Rx ring descriptor interrupts for all DMA channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 	for (i = 1; i <= XGENE_DMA_MAX_CHANNEL; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 		irq = platform_get_irq(pdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 		if (irq <= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 			return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 		pdma->chan[i - 1].rx_irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) static int xgene_dma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 	struct xgene_dma *pdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 	pdma = devm_kzalloc(&pdev->dev, sizeof(*pdma), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	if (!pdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	pdma->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 	platform_set_drvdata(pdev, pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 	ret = xgene_dma_get_resources(pdev, pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 	pdma->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	if (IS_ERR(pdma->clk) && !ACPI_COMPANION(&pdev->dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 		dev_err(&pdev->dev, "Failed to get clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 		return PTR_ERR(pdma->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 	/* Enable clk before accessing registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	if (!IS_ERR(pdma->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 		ret = clk_prepare_enable(pdma->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 			dev_err(&pdev->dev, "Failed to enable clk %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	/* Remove DMA RAM out of shutdown */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	ret = xgene_dma_init_mem(pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		goto err_clk_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(42));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 		dev_err(&pdev->dev, "No usable DMA configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 		goto err_dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	/* Initialize DMA channels software state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	xgene_dma_init_channels(pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	/* Configue DMA rings */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	ret = xgene_dma_init_rings(pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		goto err_clk_enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	ret = xgene_dma_request_irqs(pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		goto err_request_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	/* Configure and enable DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	xgene_dma_init_hw(pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 	/* Register DMA device with linux async framework */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	ret = xgene_dma_init_async(pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 		goto err_async_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) err_async_init:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	xgene_dma_free_irqs(pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) err_request_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 		xgene_dma_delete_chan_rings(&pdma->chan[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) err_dma_mask:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) err_clk_enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 	if (!IS_ERR(pdma->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 		clk_disable_unprepare(pdma->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) static int xgene_dma_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	struct xgene_dma *pdma = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	struct xgene_dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	xgene_dma_async_unregister(pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	/* Mask interrupts and disable DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	xgene_dma_mask_interrupts(pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	xgene_dma_disable(pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	xgene_dma_free_irqs(pdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		chan = &pdma->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 		tasklet_kill(&chan->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 		xgene_dma_delete_chan_rings(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	if (!IS_ERR(pdma->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 		clk_disable_unprepare(pdma->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) #ifdef CONFIG_ACPI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) static const struct acpi_device_id xgene_dma_acpi_match_ptr[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	{"APMC0D43", 0},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) MODULE_DEVICE_TABLE(acpi, xgene_dma_acpi_match_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) static const struct of_device_id xgene_dma_of_match_ptr[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	{.compatible = "apm,xgene-storm-dma",},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) MODULE_DEVICE_TABLE(of, xgene_dma_of_match_ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) static struct platform_driver xgene_dma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) 	.probe = xgene_dma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) 	.remove = xgene_dma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 		.name = "X-Gene-DMA",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 		.of_match_table = xgene_dma_of_match_ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 		.acpi_match_table = ACPI_PTR(xgene_dma_acpi_match_ptr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) module_platform_driver(xgene_dma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) MODULE_DESCRIPTION("APM X-Gene SoC DMA driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) MODULE_AUTHOR("Rameshwar Prasad Sahu <rsahu@apm.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) MODULE_AUTHOR("Loc Ho <lho@apm.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) MODULE_LICENSE("GPL");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) MODULE_VERSION("1.0");