Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Header file for the Atmel AHB DMA Controller driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2008 Atmel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #ifndef AT_HDMAC_REGS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #define	AT_HDMAC_REGS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/platform_data/dma-atmel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #define	AT_DMA_MAX_NR_CHANNELS	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define	AT_DMA_GCFG	0x00	/* Global Configuration Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define		AT_DMA_IF_BIGEND(i)	(0x1 << (i))	/* AHB-Lite Interface i in Big-endian mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define		AT_DMA_ARB_CFG	(0x1 << 4)	/* Arbiter mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define			AT_DMA_ARB_CFG_FIXED		(0x0 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define			AT_DMA_ARB_CFG_ROUND_ROBIN	(0x1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define	AT_DMA_EN	0x04	/* Controller Enable Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #define		AT_DMA_ENABLE	(0x1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define	AT_DMA_SREQ	0x08	/* Software Single Request Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define		AT_DMA_SSREQ(x)	(0x1 << ((x) << 1))		/* Request a source single transfer on channel x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define		AT_DMA_DSREQ(x)	(0x1 << (1 + ((x) << 1)))	/* Request a destination single transfer on channel x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #define	AT_DMA_CREQ	0x0C	/* Software Chunk Transfer Request Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define		AT_DMA_SCREQ(x)	(0x1 << ((x) << 1))		/* Request a source chunk transfer on channel x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define		AT_DMA_DCREQ(x)	(0x1 << (1 + ((x) << 1)))	/* Request a destination chunk transfer on channel x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define	AT_DMA_LAST	0x10	/* Software Last Transfer Flag Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #define		AT_DMA_SLAST(x)	(0x1 << ((x) << 1))		/* This src rq is last tx of buffer on channel x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define		AT_DMA_DLAST(x)	(0x1 << (1 + ((x) << 1)))	/* This dst rq is last tx of buffer on channel x */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define	AT_DMA_SYNC	0x14	/* Request Synchronization Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define		AT_DMA_SYR(h)	(0x1 << (h))			/* Synchronize handshake line h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) /* Error, Chained Buffer transfer completed and Buffer transfer completed Interrupt registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define	AT_DMA_EBCIER	0x18	/* Enable register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define	AT_DMA_EBCIDR	0x1C	/* Disable register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define	AT_DMA_EBCIMR	0x20	/* Mask Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #define	AT_DMA_EBCISR	0x24	/* Status Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define		AT_DMA_CBTC_OFFSET	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define		AT_DMA_ERR_OFFSET	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define		AT_DMA_BTC(x)	(0x1 << (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define		AT_DMA_CBTC(x)	(0x1 << (AT_DMA_CBTC_OFFSET + (x)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define		AT_DMA_ERR(x)	(0x1 << (AT_DMA_ERR_OFFSET + (x)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define	AT_DMA_CHER	0x28	/* Channel Handler Enable Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define		AT_DMA_ENA(x)	(0x1 << (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define		AT_DMA_SUSP(x)	(0x1 << ( 8 + (x)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define		AT_DMA_KEEP(x)	(0x1 << (24 + (x)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define	AT_DMA_CHDR	0x2C	/* Channel Handler Disable Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define		AT_DMA_DIS(x)	(0x1 << (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define		AT_DMA_RES(x)	(0x1 << ( 8 + (x)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define	AT_DMA_CHSR	0x30	/* Channel Handler Status Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define		AT_DMA_EMPT(x)	(0x1 << (16 + (x)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define		AT_DMA_STAL(x)	(0x1 << (24 + (x)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #define	AT_DMA_CH_REGS_BASE	0x3C	/* Channel registers base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define	ch_regs(x)	(AT_DMA_CH_REGS_BASE + (x) * 0x28) /* Channel x base addr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) /* Hardware register offset for each channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define	ATC_SADDR_OFFSET	0x00	/* Source Address Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define	ATC_DADDR_OFFSET	0x04	/* Destination Address Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #define	ATC_DSCR_OFFSET		0x08	/* Descriptor Address Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #define	ATC_CTRLA_OFFSET	0x0C	/* Control A Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #define	ATC_CTRLB_OFFSET	0x10	/* Control B Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #define	ATC_CFG_OFFSET		0x14	/* Configuration Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #define	ATC_SPIP_OFFSET		0x18	/* Src PIP Configuration Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #define	ATC_DPIP_OFFSET		0x1C	/* Dst PIP Configuration Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) /* Bitfield definitions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) /* Bitfields in DSCR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) #define	ATC_DSCR_IF(i)		(0x3 & (i))	/* Dsc feched via AHB-Lite Interface i */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) /* Bitfields in CTRLA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) #define	ATC_BTSIZE_MAX		0xFFFFUL	/* Maximum Buffer Transfer Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) #define	ATC_BTSIZE(x)		(ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) #define	ATC_SCSIZE_MASK		(0x7 << 16)	/* Source Chunk Transfer Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) #define		ATC_SCSIZE(x)		(ATC_SCSIZE_MASK & ((x) << 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #define		ATC_SCSIZE_1		(0x0 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) #define		ATC_SCSIZE_4		(0x1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #define		ATC_SCSIZE_8		(0x2 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #define		ATC_SCSIZE_16		(0x3 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) #define		ATC_SCSIZE_32		(0x4 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #define		ATC_SCSIZE_64		(0x5 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #define		ATC_SCSIZE_128		(0x6 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) #define		ATC_SCSIZE_256		(0x7 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #define	ATC_DCSIZE_MASK		(0x7 << 20)	/* Destination Chunk Transfer Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #define		ATC_DCSIZE(x)		(ATC_DCSIZE_MASK & ((x) << 20))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #define		ATC_DCSIZE_1		(0x0 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) #define		ATC_DCSIZE_4		(0x1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define		ATC_DCSIZE_8		(0x2 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define		ATC_DCSIZE_16		(0x3 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define		ATC_DCSIZE_32		(0x4 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define		ATC_DCSIZE_64		(0x5 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define		ATC_DCSIZE_128		(0x6 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define		ATC_DCSIZE_256		(0x7 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define	ATC_SRC_WIDTH_MASK	(0x3 << 24)	/* Source Single Transfer Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define		ATC_SRC_WIDTH(x)	((x) << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define		ATC_SRC_WIDTH_BYTE	(0x0 << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define		ATC_SRC_WIDTH_HALFWORD	(0x1 << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define		ATC_SRC_WIDTH_WORD	(0x2 << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #define		ATC_REG_TO_SRC_WIDTH(r)	(((r) >> 24) & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define	ATC_DST_WIDTH_MASK	(0x3 << 28)	/* Destination Single Transfer Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define		ATC_DST_WIDTH(x)	((x) << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define		ATC_DST_WIDTH_BYTE	(0x0 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define		ATC_DST_WIDTH_HALFWORD	(0x1 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define		ATC_DST_WIDTH_WORD	(0x2 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define	ATC_DONE		(0x1 << 31)	/* Tx Done (only written back in descriptor) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* Bitfields in CTRLB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define	ATC_SIF(i)		(0x3 & (i))	/* Src tx done via AHB-Lite Interface i */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define	ATC_DIF(i)		((0x3 & (i)) <<  4)	/* Dst tx done via AHB-Lite Interface i */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 				  /* Specify AHB interfaces */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define AT_DMA_MEM_IF		0 /* interface 0 as memory interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define AT_DMA_PER_IF		1 /* interface 1 as peripheral interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define	ATC_SRC_PIP		(0x1 <<  8)	/* Source Picture-in-Picture enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define	ATC_DST_PIP		(0x1 << 12)	/* Destination Picture-in-Picture enabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define	ATC_SRC_DSCR_DIS	(0x1 << 16)	/* Src Descriptor fetch disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define	ATC_DST_DSCR_DIS	(0x1 << 20)	/* Dst Descriptor fetch disable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define	ATC_FC_MASK		(0x7 << 21)	/* Choose Flow Controller */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define		ATC_FC_MEM2MEM		(0x0 << 21)	/* Mem-to-Mem (DMA) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define		ATC_FC_MEM2PER		(0x1 << 21)	/* Mem-to-Periph (DMA) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define		ATC_FC_PER2MEM		(0x2 << 21)	/* Periph-to-Mem (DMA) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define		ATC_FC_PER2PER		(0x3 << 21)	/* Periph-to-Periph (DMA) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define		ATC_FC_PER2MEM_PER	(0x4 << 21)	/* Periph-to-Mem (Peripheral) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define		ATC_FC_MEM2PER_PER	(0x5 << 21)	/* Mem-to-Periph (Peripheral) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define		ATC_FC_PER2PER_SRCPER	(0x6 << 21)	/* Periph-to-Periph (Src Peripheral) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define		ATC_FC_PER2PER_DSTPER	(0x7 << 21)	/* Periph-to-Periph (Dst Peripheral) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define	ATC_SRC_ADDR_MODE_MASK	(0x3 << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define		ATC_SRC_ADDR_MODE_INCR	(0x0 << 24)	/* Incrementing Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define		ATC_SRC_ADDR_MODE_DECR	(0x1 << 24)	/* Decrementing Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define		ATC_SRC_ADDR_MODE_FIXED	(0x2 << 24)	/* Fixed Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define	ATC_DST_ADDR_MODE_MASK	(0x3 << 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define		ATC_DST_ADDR_MODE_INCR	(0x0 << 28)	/* Incrementing Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define		ATC_DST_ADDR_MODE_DECR	(0x1 << 28)	/* Decrementing Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #define		ATC_DST_ADDR_MODE_FIXED	(0x2 << 28)	/* Fixed Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define	ATC_IEN			(0x1 << 30)	/* BTC interrupt enable (active low) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define	ATC_AUTO		(0x1 << 31)	/* Auto multiple buffer tx enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) /* Bitfields in CFG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /* are in at_hdmac.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* Bitfields in SPIP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define	ATC_SPIP_HOLE(x)	(0xFFFFU & (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define	ATC_SPIP_BOUNDARY(x)	((0x3FF & (x)) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* Bitfields in DPIP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define	ATC_DPIP_HOLE(x)	(0xFFFFU & (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define	ATC_DPIP_BOUNDARY(x)	((0x3FF & (x)) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /*--  descriptors  -----------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* LLI == Linked List Item; aka DMA buffer descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct at_lli {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	/* values that are not changed by hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	dma_addr_t	saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	dma_addr_t	daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	/* value that may get written back: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	u32		ctrla;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	/* more values that are not changed by hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	u32		ctrlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	dma_addr_t	dscr;	/* chain to next lli */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  * struct at_desc - software descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  * @at_lli: hardware lli structure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  * @txd: support for the async_tx api
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  * @desc_node: node on the channed descriptors list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  * @len: descriptor byte count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  * @total_len: total transaction byte count
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) struct at_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	/* FIRST values the hardware uses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	struct at_lli			lli;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	/* THEN values for driver housekeeping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	struct list_head		tx_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	struct dma_async_tx_descriptor	txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	struct list_head		desc_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	size_t				len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	size_t				total_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	/* Interleaved data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	size_t				boundary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	size_t				dst_hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	size_t				src_hole;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	/* Memset temporary buffer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	bool				memset_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	dma_addr_t			memset_paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	int				*memset_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static inline struct at_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) txd_to_at_desc(struct dma_async_tx_descriptor *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	return container_of(txd, struct at_desc, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /*--  Channels  --------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)  * atc_status - information bits stored in channel status flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)  * Manipulated with atomic operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) enum atc_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	ATC_IS_ERROR = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	ATC_IS_PAUSED = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	ATC_IS_CYCLIC = 24,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  * struct at_dma_chan - internal representation of an Atmel HDMAC channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  * @chan_common: common dmaengine channel object members
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  * @device: parent device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * @ch_regs: memory mapped register base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  * @mask: channel index in a mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  * @per_if: peripheral interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  * @mem_if: memory interface
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  * @status: transmit status information from irq/prep* functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  *                to tasklet (use atomic operations)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  * @tasklet: bottom half to finish transaction work
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  * @save_cfg: configuration register that is saved on suspend/resume cycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  * @save_dscr: for cyclic operations, preserve next descriptor address in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  *             the cyclic list on suspend/resume cycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  * @dma_sconfig: configuration for slave transfers, passed via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)  * .device_config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)  * @lock: serializes enqueue/dequeue operations to descriptors lists
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)  * @active_list: list of descriptors dmaengine is being running on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)  * @queue: list of descriptors ready to be submitted to engine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)  * @free_list: list of descriptors usable by the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) struct at_dma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	struct dma_chan		chan_common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	struct at_dma		*device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	void __iomem		*ch_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	u8			mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	u8			per_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	u8			mem_if;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	unsigned long		status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	struct tasklet_struct	tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	u32			save_cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	u32			save_dscr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	struct dma_slave_config dma_sconfig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	spinlock_t		lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	/* these other elements are all protected by lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	struct list_head	active_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	struct list_head	queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	struct list_head	free_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #define	channel_readl(atchan, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	__raw_readl((atchan)->ch_regs + ATC_##name##_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #define	channel_writel(atchan, name, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	__raw_writel((val), (atchan)->ch_regs + ATC_##name##_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	return container_of(dchan, struct at_dma_chan, chan_common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)  * Fix sconfig's burst size according to at_hdmac. We need to convert them as:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3, 32 -> 4, 64 -> 5, 128 -> 6, 256 -> 7.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)  * This can be done by finding most significant bit set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static inline void convert_burst(u32 *maxburst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	if (*maxburst > 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		*maxburst = fls(*maxburst) - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		*maxburst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  * Fix sconfig's bus width according to at_hdmac.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  * 1 byte -> 0, 2 bytes -> 1, 4 bytes -> 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	switch (addr_width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 		return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		/* For 1 byte width or fallback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /*--  Controller  ------------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)  * struct at_dma - internal representation of an Atmel HDMA Controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)  * @chan_common: common dmaengine dma_device object members
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)  * @atdma_devtype: identifier of DMA controller compatibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)  * @ch_regs: memory mapped register base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)  * @clk: dma controller clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  * @save_imr: interrupt mask register that is saved on suspend/resume cycle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  * @all_chan_mask: all channels availlable in a mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  * @dma_desc_pool: base of DMA descriptor region (DMA address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  * @chan: channels table to store at_dma_chan structures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct at_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	struct dma_device	dma_common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	void __iomem		*regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	struct clk		*clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	u32			save_imr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	u8			all_chan_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	struct dma_pool		*dma_desc_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	struct dma_pool		*memset_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	/* AT THE END channels table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	struct at_dma_chan	chan[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) #define	dma_readl(atdma, name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	__raw_readl((atdma)->regs + AT_DMA_##name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) #define	dma_writel(atdma, name, val) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	__raw_writel((val), (atdma)->regs + AT_DMA_##name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) static inline struct at_dma *to_at_dma(struct dma_device *ddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	return container_of(ddev, struct at_dma, dma_common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) /*--  Helper functions  ------------------------------------------------*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static struct device *chan2dev(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	return &chan->dev->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) #if defined(VERBOSE_DEBUG)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) static void vdbg_dump_regs(struct at_dma_chan *atchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	struct at_dma	*atdma = to_at_dma(atchan->chan_common.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	dev_err(chan2dev(&atchan->chan_common),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		"  channel %d : imr = 0x%x, chsr = 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		atchan->chan_common.chan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		dma_readl(atdma, EBCIMR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		dma_readl(atdma, CHSR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	dev_err(chan2dev(&atchan->chan_common),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		"  channel: s0x%x d0x%x ctrl0x%x:0x%x cfg0x%x l0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		channel_readl(atchan, SADDR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		channel_readl(atchan, DADDR),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		channel_readl(atchan, CTRLA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		channel_readl(atchan, CTRLB),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		channel_readl(atchan, CFG),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		channel_readl(atchan, DSCR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) static void vdbg_dump_regs(struct at_dma_chan *atchan) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	dev_crit(chan2dev(&atchan->chan_common),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		 "desc: s%pad d%pad ctrl0x%x:0x%x l%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		 &lli->saddr, &lli->daddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		 lli->ctrla, lli->ctrlb, &lli->dscr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) static void atc_setup_irq(struct at_dma *atdma, int chan_id, int on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	u32 ebci;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	/* enable interrupts on buffer transfer completion & error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	ebci =    AT_DMA_BTC(chan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		| AT_DMA_ERR(chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	if (on)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		dma_writel(atdma, EBCIER, ebci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		dma_writel(atdma, EBCIDR, ebci);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) static void atc_enable_chan_irq(struct at_dma *atdma, int chan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	atc_setup_irq(atdma, chan_id, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static void atc_disable_chan_irq(struct at_dma *atdma, int chan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	atc_setup_irq(atdma, chan_id, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)  * atc_chan_is_enabled - test if given channel is enabled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)  * @atchan: channel we want to test status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static inline int atc_chan_is_enabled(struct at_dma_chan *atchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	struct at_dma	*atdma = to_at_dma(atchan->chan_common.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	return !!(dma_readl(atdma, CHSR) & atchan->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)  * atc_chan_is_paused - test channel pause/resume status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)  * @atchan: channel we want to test status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) static inline int atc_chan_is_paused(struct at_dma_chan *atchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	return test_bit(ATC_IS_PAUSED, &atchan->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)  * atc_chan_is_cyclic - test if given channel has cyclic property set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)  * @atchan: channel we want to test status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	return test_bit(ATC_IS_CYCLIC, &atchan->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)  * set_desc_eol - set end-of-link to descriptor so it will end transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)  * @desc: descriptor, signle or at the end of a chain, to end chain on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) static void set_desc_eol(struct at_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	u32 ctrlb = desc->lli.ctrlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	ctrlb &= ~ATC_IEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	ctrlb |= ATC_SRC_DSCR_DIS | ATC_DST_DSCR_DIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	desc->lli.ctrlb = ctrlb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	desc->lli.dscr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) #endif /* AT_HDMAC_REGS_H */