Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2014 Atmel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <dt-bindings/dma/at91.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) /* Global registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #define AT_XDMAC_GTYPE		0x00	/* Global Type Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define		AT_XDMAC_NB_CH(i)	(((i) & 0x1F) + 1)		/* Number of Channels Minus One */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define		AT_XDMAC_FIFO_SZ(i)	(((i) >> 5) & 0x7FF)		/* Number of Bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define		AT_XDMAC_NB_REQ(i)	((((i) >> 16) & 0x3F) + 1)	/* Number of Peripheral Requests Minus One */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define AT_XDMAC_GCFG		0x04	/* Global Configuration Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define AT_XDMAC_GWAC		0x08	/* Global Weighted Arbiter Configuration Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define AT_XDMAC_GIE		0x0C	/* Global Interrupt Enable Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define AT_XDMAC_GID		0x10	/* Global Interrupt Disable Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define AT_XDMAC_GIM		0x14	/* Global Interrupt Mask Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #define AT_XDMAC_GIS		0x18	/* Global Interrupt Status Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define AT_XDMAC_GE		0x1C	/* Global Channel Enable Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define AT_XDMAC_GD		0x20	/* Global Channel Disable Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define AT_XDMAC_GS		0x24	/* Global Channel Status Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define AT_XDMAC_GRS		0x28	/* Global Channel Read Suspend Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define AT_XDMAC_GWS		0x2C	/* Global Write Suspend Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define AT_XDMAC_GRWS		0x30	/* Global Channel Read Write Suspend Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define AT_XDMAC_GRWR		0x34	/* Global Channel Read Write Resume Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define AT_XDMAC_GSWR		0x38	/* Global Channel Software Request Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define AT_XDMAC_GSWS		0x3C	/* Global channel Software Request Status Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define AT_XDMAC_GSWF		0x40	/* Global Channel Software Flush Request Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define AT_XDMAC_VERSION	0xFFC	/* XDMAC Version Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) /* Channel relative registers offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define AT_XDMAC_CIE		0x00	/* Channel Interrupt Enable Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define		AT_XDMAC_CIE_BIE	BIT(0)	/* End of Block Interrupt Enable Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define		AT_XDMAC_CIE_LIE	BIT(1)	/* End of Linked List Interrupt Enable Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define		AT_XDMAC_CIE_DIE	BIT(2)	/* End of Disable Interrupt Enable Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define		AT_XDMAC_CIE_FIE	BIT(3)	/* End of Flush Interrupt Enable Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define		AT_XDMAC_CIE_RBEIE	BIT(4)	/* Read Bus Error Interrupt Enable Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define		AT_XDMAC_CIE_WBEIE	BIT(5)	/* Write Bus Error Interrupt Enable Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define		AT_XDMAC_CIE_ROIE	BIT(6)	/* Request Overflow Interrupt Enable Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define AT_XDMAC_CID		0x04	/* Channel Interrupt Disable Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define		AT_XDMAC_CID_BID	BIT(0)	/* End of Block Interrupt Disable Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define		AT_XDMAC_CID_LID	BIT(1)	/* End of Linked List Interrupt Disable Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define		AT_XDMAC_CID_DID	BIT(2)	/* End of Disable Interrupt Disable Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define		AT_XDMAC_CID_FID	BIT(3)	/* End of Flush Interrupt Disable Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define		AT_XDMAC_CID_RBEID	BIT(4)	/* Read Bus Error Interrupt Disable Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define		AT_XDMAC_CID_WBEID	BIT(5)	/* Write Bus Error Interrupt Disable Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define		AT_XDMAC_CID_ROID	BIT(6)	/* Request Overflow Interrupt Disable Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define AT_XDMAC_CIM		0x08	/* Channel Interrupt Mask Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define		AT_XDMAC_CIM_BIM	BIT(0)	/* End of Block Interrupt Mask Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define		AT_XDMAC_CIM_LIM	BIT(1)	/* End of Linked List Interrupt Mask Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define		AT_XDMAC_CIM_DIM	BIT(2)	/* End of Disable Interrupt Mask Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define		AT_XDMAC_CIM_FIM	BIT(3)	/* End of Flush Interrupt Mask Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define		AT_XDMAC_CIM_RBEIM	BIT(4)	/* Read Bus Error Interrupt Mask Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define		AT_XDMAC_CIM_WBEIM	BIT(5)	/* Write Bus Error Interrupt Mask Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define		AT_XDMAC_CIM_ROIM	BIT(6)	/* Request Overflow Interrupt Mask Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define AT_XDMAC_CIS		0x0C	/* Channel Interrupt Status Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define		AT_XDMAC_CIS_BIS	BIT(0)	/* End of Block Interrupt Status Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define		AT_XDMAC_CIS_LIS	BIT(1)	/* End of Linked List Interrupt Status Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define		AT_XDMAC_CIS_DIS	BIT(2)	/* End of Disable Interrupt Status Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define		AT_XDMAC_CIS_FIS	BIT(3)	/* End of Flush Interrupt Status Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #define		AT_XDMAC_CIS_RBEIS	BIT(4)	/* Read Bus Error Interrupt Status Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define		AT_XDMAC_CIS_WBEIS	BIT(5)	/* Write Bus Error Interrupt Status Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define		AT_XDMAC_CIS_ROIS	BIT(6)	/* Request Overflow Interrupt Status Bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define AT_XDMAC_CSA		0x10	/* Channel Source Address Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define AT_XDMAC_CDA		0x14	/* Channel Destination Address Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define AT_XDMAC_CNDA		0x18	/* Channel Next Descriptor Address Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define		AT_XDMAC_CNDA_NDAIF(i)	((i) & 0x1)			/* Channel x Next Descriptor Interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define		AT_XDMAC_CNDA_NDA(i)	((i) & 0xfffffffc)		/* Channel x Next Descriptor Address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #define AT_XDMAC_CNDC		0x1C	/* Channel Next Descriptor Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #define		AT_XDMAC_CNDC_NDE		(0x1 << 0)		/* Channel x Next Descriptor Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define		AT_XDMAC_CNDC_NDSUP		(0x1 << 1)		/* Channel x Next Descriptor Source Update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define		AT_XDMAC_CNDC_NDDUP		(0x1 << 2)		/* Channel x Next Descriptor Destination Update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #define		AT_XDMAC_CNDC_NDVIEW_MASK	GENMASK(28, 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #define		AT_XDMAC_CNDC_NDVIEW_NDV0	(0x0 << 3)		/* Channel x Next Descriptor View 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define		AT_XDMAC_CNDC_NDVIEW_NDV1	(0x1 << 3)		/* Channel x Next Descriptor View 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #define		AT_XDMAC_CNDC_NDVIEW_NDV2	(0x2 << 3)		/* Channel x Next Descriptor View 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #define		AT_XDMAC_CNDC_NDVIEW_NDV3	(0x3 << 3)		/* Channel x Next Descriptor View 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) #define AT_XDMAC_CUBC		0x20	/* Channel Microblock Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #define AT_XDMAC_CBC		0x24	/* Channel Block Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #define AT_XDMAC_CC		0x28	/* Channel Configuration Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define		AT_XDMAC_CC_TYPE	(0x1 << 0)	/* Channel Transfer Type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #define			AT_XDMAC_CC_TYPE_MEM_TRAN	(0x0 << 0)	/* Memory to Memory Transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #define			AT_XDMAC_CC_TYPE_PER_TRAN	(0x1 << 0)	/* Peripheral to Memory or Memory to Peripheral Transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) #define		AT_XDMAC_CC_MBSIZE_MASK	(0x3 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #define			AT_XDMAC_CC_MBSIZE_SINGLE	(0x0 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) #define			AT_XDMAC_CC_MBSIZE_FOUR		(0x1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) #define			AT_XDMAC_CC_MBSIZE_EIGHT	(0x2 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) #define			AT_XDMAC_CC_MBSIZE_SIXTEEN	(0x3 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) #define		AT_XDMAC_CC_DSYNC	(0x1 << 4)	/* Channel Synchronization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) #define			AT_XDMAC_CC_DSYNC_PER2MEM	(0x0 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) #define			AT_XDMAC_CC_DSYNC_MEM2PER	(0x1 << 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) #define		AT_XDMAC_CC_PROT	(0x1 << 5)	/* Channel Protection */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #define			AT_XDMAC_CC_PROT_SEC		(0x0 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) #define			AT_XDMAC_CC_PROT_UNSEC		(0x1 << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) #define		AT_XDMAC_CC_SWREQ	(0x1 << 6)	/* Channel Software Request Trigger */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) #define			AT_XDMAC_CC_SWREQ_HWR_CONNECTED	(0x0 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) #define			AT_XDMAC_CC_SWREQ_SWR_CONNECTED	(0x1 << 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) #define		AT_XDMAC_CC_MEMSET	(0x1 << 7)	/* Channel Fill Block of memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) #define			AT_XDMAC_CC_MEMSET_NORMAL_MODE	(0x0 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) #define			AT_XDMAC_CC_MEMSET_HW_MODE	(0x1 << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) #define		AT_XDMAC_CC_CSIZE(i)	((0x7 & (i)) << 8)	/* Channel Chunk Size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) #define		AT_XDMAC_CC_DWIDTH_OFFSET	11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) #define		AT_XDMAC_CC_DWIDTH_MASK	(0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) #define		AT_XDMAC_CC_DWIDTH(i)	((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET)	/* Channel Data Width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) #define			AT_XDMAC_CC_DWIDTH_BYTE		0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) #define			AT_XDMAC_CC_DWIDTH_HALFWORD	0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) #define			AT_XDMAC_CC_DWIDTH_WORD		0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #define			AT_XDMAC_CC_DWIDTH_DWORD	0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) #define		AT_XDMAC_CC_SIF(i)	((0x1 & (i)) << 13)	/* Channel Source Interface Identifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) #define		AT_XDMAC_CC_DIF(i)	((0x1 & (i)) << 14)	/* Channel Destination Interface Identifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #define		AT_XDMAC_CC_SAM_MASK	(0x3 << 16)	/* Channel Source Addressing Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) #define			AT_XDMAC_CC_SAM_FIXED_AM	(0x0 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) #define			AT_XDMAC_CC_SAM_INCREMENTED_AM	(0x1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) #define			AT_XDMAC_CC_SAM_UBS_AM		(0x2 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) #define			AT_XDMAC_CC_SAM_UBS_DS_AM	(0x3 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) #define		AT_XDMAC_CC_DAM_MASK	(0x3 << 18)	/* Channel Source Addressing Mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) #define			AT_XDMAC_CC_DAM_FIXED_AM	(0x0 << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) #define			AT_XDMAC_CC_DAM_INCREMENTED_AM	(0x1 << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) #define			AT_XDMAC_CC_DAM_UBS_AM		(0x2 << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) #define			AT_XDMAC_CC_DAM_UBS_DS_AM	(0x3 << 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) #define		AT_XDMAC_CC_INITD	(0x1 << 21)	/* Channel Initialization Terminated (read only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) #define			AT_XDMAC_CC_INITD_TERMINATED	(0x0 << 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) #define			AT_XDMAC_CC_INITD_IN_PROGRESS	(0x1 << 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) #define		AT_XDMAC_CC_RDIP	(0x1 << 22)	/* Read in Progress (read only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) #define			AT_XDMAC_CC_RDIP_DONE		(0x0 << 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #define			AT_XDMAC_CC_RDIP_IN_PROGRESS	(0x1 << 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) #define		AT_XDMAC_CC_WRIP	(0x1 << 23)	/* Write in Progress (read only) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) #define			AT_XDMAC_CC_WRIP_DONE		(0x0 << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) #define			AT_XDMAC_CC_WRIP_IN_PROGRESS	(0x1 << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) #define		AT_XDMAC_CC_PERID(i)	((0x7f & (i)) << 24)	/* Channel Peripheral Identifier */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) #define AT_XDMAC_CDS_MSP	0x2C	/* Channel Data Stride Memory Set Pattern */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) #define AT_XDMAC_CSUS		0x30	/* Channel Source Microblock Stride */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) #define AT_XDMAC_CDUS		0x34	/* Channel Destination Microblock Stride */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) #define AT_XDMAC_CHAN_REG_BASE	0x50	/* Channel registers base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) /* Microblock control members */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) #define AT_XDMAC_MBR_UBC_UBLEN_MAX	0xFFFFFFUL	/* Maximum Microblock Length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) #define AT_XDMAC_MBR_UBC_NDE		(0x1 << 24)	/* Next Descriptor Enable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) #define AT_XDMAC_MBR_UBC_NSEN		(0x1 << 25)	/* Next Descriptor Source Update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) #define AT_XDMAC_MBR_UBC_NDEN		(0x1 << 26)	/* Next Descriptor Destination Update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) #define AT_XDMAC_MBR_UBC_NDV0		(0x0 << 27)	/* Next Descriptor View 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) #define AT_XDMAC_MBR_UBC_NDV1		(0x1 << 27)	/* Next Descriptor View 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) #define AT_XDMAC_MBR_UBC_NDV2		(0x2 << 27)	/* Next Descriptor View 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) #define AT_XDMAC_MBR_UBC_NDV3		(0x3 << 27)	/* Next Descriptor View 3 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) #define AT_XDMAC_MAX_CHAN	0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) #define AT_XDMAC_MAX_CSIZE	16	/* 16 data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) #define AT_XDMAC_MAX_DWIDTH	8	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) #define AT_XDMAC_RESIDUE_MAX_RETRIES	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) #define AT_XDMAC_DMA_BUSWIDTHS\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) enum atc_status {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	AT_XDMAC_CHAN_IS_CYCLIC = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	AT_XDMAC_CHAN_IS_PAUSED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) /* ----- Channels ----- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) struct at_xdmac_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	struct dma_chan			chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	void __iomem			*ch_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	u32				mask;		/* Channel Mask */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	u32				cfg;		/* Channel Configuration Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	u8				perid;		/* Peripheral ID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	u8				perif;		/* Peripheral Interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	u8				memif;		/* Memory Interface */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	u32				save_cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	u32				save_cim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	u32				save_cnda;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	u32				save_cndc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	u32				irq_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	unsigned long			status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	struct tasklet_struct		tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	struct dma_slave_config		sconfig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	spinlock_t			lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	struct list_head		xfers_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	struct list_head		free_descs_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) /* ----- Controller ----- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) struct at_xdmac {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	struct dma_device	dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	void __iomem		*regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	int			irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	struct clk		*clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	u32			save_gim;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	struct dma_pool		*at_xdmac_desc_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	struct at_xdmac_chan	chan[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) /* ----- Descriptors ----- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) /* Linked List Descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) struct at_xdmac_lld {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	u32 mbr_nda;	/* Next Descriptor Member */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	u32 mbr_ubc;	/* Microblock Control Member */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	u32 mbr_sa;	/* Source Address Member */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	u32 mbr_da;	/* Destination Address Member */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	u32 mbr_cfg;	/* Configuration Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	u32 mbr_bc;	/* Block Control Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	u32 mbr_ds;	/* Data Stride Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	u32 mbr_sus;	/* Source Microblock Stride Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	u32 mbr_dus;	/* Destination Microblock Stride Register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) struct at_xdmac_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	struct at_xdmac_lld		lld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	enum dma_transfer_direction	direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	struct dma_async_tx_descriptor	tx_dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	struct list_head		desc_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	/* Following members are only used by the first descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	bool				active_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	unsigned int			xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	struct list_head		descs_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	struct list_head		xfer_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) } __aligned(sizeof(u64));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) #define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) #define at_xdmac_write(atxdmac, reg, value) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	writel_relaxed((value), (atxdmac)->regs + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) #define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) #define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	return container_of(dchan, struct at_xdmac_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) static struct device *chan2dev(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	return &chan->dev->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	return container_of(ddev, struct at_xdmac, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) static inline int at_xdmac_csize(u32 maxburst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	int csize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	csize = ffs(maxburst) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	if (csize > 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		csize = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	return csize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 	return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) static inline u8 at_xdmac_get_dwidth(u32 cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) static unsigned int init_nr_desc_per_channel = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) module_param(init_nr_desc_per_channel, uint, 0644);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) MODULE_PARM_DESC(init_nr_desc_per_channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		 "initial descriptors per channel (default: 64)");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) static void at_xdmac_off(struct at_xdmac *atxdmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	/* Wait that all chans are disabled. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) /* Call with lock hold. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 				struct at_xdmac_desc *first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	struct at_xdmac	*atxdmac = to_at_xdmac(atchan->chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 	u32		reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	/* Set transfer as active to not try to start it again. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	first->active_xfer = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	/* Tell xdmac where to get the first descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	      | AT_XDMAC_CNDA_NDAIF(atchan->memif);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	 * When doing non cyclic transfer we need to use the next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	 * descriptor view 2 since some fields of the configuration register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	 * depend on transfer size and src/dest addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	if (at_xdmac_chan_is_cyclic(atchan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 		reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	else if ((first->lld.mbr_ubc &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 		  AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 		reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	 * Even if the register will be updated from the configuration in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	 * descriptor when using view 2 or higher, the PROT bit won't be set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	 * properly. This bit can be modified only by using the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	 * configuration register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	reg |= AT_XDMAC_CNDC_NDDUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	       | AT_XDMAC_CNDC_NDSUP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	       | AT_XDMAC_CNDC_NDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 	dev_vdbg(chan2dev(&atchan->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	 * Request Overflow Error is only for peripheral synchronized transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 		reg |= AT_XDMAC_CIE_ROIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	 * There is no end of list when doing cyclic dma, we need to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	 * an interrupt after each periods.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	if (at_xdmac_chan_is_cyclic(atchan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 				    reg | AT_XDMAC_CIE_BIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 				    reg | AT_XDMAC_CIE_LIE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	dev_vdbg(chan2dev(&atchan->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 		 "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	dev_vdbg(chan2dev(&atchan->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 		 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	struct at_xdmac_desc	*desc = txd_to_at_desc(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(tx->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	dma_cookie_t		cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	unsigned long		irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	spin_lock_irqsave(&atchan->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	cookie = dma_cookie_assign(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	list_add_tail(&desc->xfer_node, &atchan->xfers_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	spin_unlock_irqrestore(&atchan->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		 __func__, atchan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 						 gfp_t gfp_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	struct at_xdmac_desc	*desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	struct at_xdmac		*atxdmac = to_at_xdmac(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	dma_addr_t		phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	if (desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 		INIT_LIST_HEAD(&desc->descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 		desc->tx_dma_desc.phys = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	memset(&desc->lld, 0, sizeof(desc->lld));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	INIT_LIST_HEAD(&desc->descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	desc->direction = DMA_TRANS_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	desc->xfer_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	desc->active_xfer = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) /* Call must be protected by lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	struct at_xdmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	if (list_empty(&atchan->free_descs_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		desc = list_first_entry(&atchan->free_descs_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 					struct at_xdmac_desc, desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		list_del(&desc->desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		at_xdmac_init_used_desc(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) static void at_xdmac_queue_desc(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 				struct at_xdmac_desc *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 				struct at_xdmac_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	if (!prev || !desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	prev->lld.mbr_nda = desc->tx_dma_desc.phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	dev_dbg(chan2dev(chan),	"%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		__func__, prev, &prev->lld.mbr_nda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 						  struct at_xdmac_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	desc->lld.mbr_bc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		"%s: incrementing the block count of the desc 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		__func__, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 				       struct of_dma *of_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	struct at_xdmac		*atxdmac = of_dma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	struct at_xdmac_chan	*atchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	struct dma_chan		*chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	struct device		*dev = atxdmac->dma.dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	if (dma_spec->args_count != 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		dev_err(dev, "dma phandler args: bad number of args\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	chan = dma_get_any_slave_channel(&atxdmac->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	if (!chan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 		dev_err(dev, "can't get a dma channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		 atchan->memif, atchan->perif, atchan->perid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 				      enum dma_transfer_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	int			csize, dwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	if (direction == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 		atchan->cfg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 			AT91_XDMAC_DT_PERID(atchan->perid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 			| AT_XDMAC_CC_DAM_INCREMENTED_AM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 			| AT_XDMAC_CC_SAM_FIXED_AM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 			| AT_XDMAC_CC_DIF(atchan->memif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 			| AT_XDMAC_CC_SIF(atchan->perif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 			| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 			| AT_XDMAC_CC_DSYNC_PER2MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 			| AT_XDMAC_CC_MBSIZE_SIXTEEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 			| AT_XDMAC_CC_TYPE_PER_TRAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		csize = ffs(atchan->sconfig.src_maxburst) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 		if (csize < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 			dev_err(chan2dev(chan), "invalid src maxburst value\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 		atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 		if (dwidth < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 			dev_err(chan2dev(chan), "invalid src addr width value\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	} else if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 		atchan->cfg =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 			AT91_XDMAC_DT_PERID(atchan->perid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 			| AT_XDMAC_CC_DAM_FIXED_AM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 			| AT_XDMAC_CC_SAM_INCREMENTED_AM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 			| AT_XDMAC_CC_DIF(atchan->perif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			| AT_XDMAC_CC_SIF(atchan->memif)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 			| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 			| AT_XDMAC_CC_DSYNC_MEM2PER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 			| AT_XDMAC_CC_MBSIZE_SIXTEEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 			| AT_XDMAC_CC_TYPE_PER_TRAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		csize = ffs(atchan->sconfig.dst_maxburst) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		if (csize < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 			dev_err(chan2dev(chan), "invalid src maxburst value\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 		dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		if (dwidth < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 			dev_err(chan2dev(chan), "invalid dst addr width value\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 		atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	dev_dbg(chan2dev(chan),	"%s: cfg=0x%08x\n", __func__, atchan->cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598)  * Only check that maxburst and addr width values are supported by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599)  * the controller but not that the configuration is good to perform the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600)  * transfer since we don't know the direction at this stage.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	    || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	    || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) static int at_xdmac_set_slave_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 				      struct dma_slave_config *sconfig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	if (at_xdmac_check_slave_config(sconfig)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		dev_err(chan2dev(chan), "invalid slave configuration\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		       unsigned int sg_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		       unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	struct at_xdmac_chan		*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	struct at_xdmac_desc		*first = NULL, *prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	struct scatterlist		*sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	int				i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	unsigned int			xfer_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	unsigned long			irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	struct dma_async_tx_descriptor	*ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	if (!sgl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	if (!is_slave_direction(direction)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		dev_err(chan2dev(chan), "invalid DMA direction\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		 __func__, sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		 direction == DMA_MEM_TO_DEV ? "to device" : "from device",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		 flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	/* Protect dma_sconfig field that can be modified by set_slave_conf. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	spin_lock_irqsave(&atchan->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	if (at_xdmac_compute_chan_conf(chan, direction))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		goto spin_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	/* Prepare descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		struct at_xdmac_desc	*desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		u32			len, mem, dwidth, fixed_dwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 		len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		mem = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		if (unlikely(!len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			dev_err(chan2dev(chan), "sg data length is zero\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 			goto spin_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 		dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			 __func__, i, len, mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		desc = at_xdmac_get_desc(atchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 		if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			dev_err(chan2dev(chan), "can't get descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 			if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 				list_splice_init(&first->descs_list, &atchan->free_descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 			goto spin_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 		/* Linked list descriptor setup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		if (direction == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			desc->lld.mbr_sa = atchan->sconfig.src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 			desc->lld.mbr_da = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			desc->lld.mbr_sa = mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 			desc->lld.mbr_da = atchan->sconfig.dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		dwidth = at_xdmac_get_dwidth(atchan->cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 		fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 			       ? dwidth
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			       : AT_XDMAC_CC_DWIDTH_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2			/* next descriptor view */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 			| AT_XDMAC_MBR_UBC_NDEN					/* next descriptor dst parameter update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 			| AT_XDMAC_MBR_UBC_NSEN					/* next descriptor src parameter update */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 			| (len >> fixed_dwidth);				/* microblock length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 				    AT_XDMAC_CC_DWIDTH(fixed_dwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 		dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 			 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		/* Chain lld. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 		if (prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 			at_xdmac_queue_desc(chan, prev, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		prev = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 			first = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 			 __func__, desc, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		list_add_tail(&desc->desc_node, &first->descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		xfer_size += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	first->tx_dma_desc.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	first->xfer_size = xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	first->direction = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	ret = &first->tx_dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) spin_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	spin_unlock_irqrestore(&atchan->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 			 size_t buf_len, size_t period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 			 enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 			 unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	struct at_xdmac_desc	*first = NULL, *prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	unsigned int		periods = buf_len / period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	int			i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	unsigned long		irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		__func__, &buf_addr, buf_len, period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	if (!is_slave_direction(direction)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 		dev_err(chan2dev(chan), "invalid DMA direction\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		dev_err(chan2dev(chan), "channel currently used\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	if (at_xdmac_compute_chan_conf(chan, direction))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	for (i = 0; i < periods; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		struct at_xdmac_desc	*desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 		spin_lock_irqsave(&atchan->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		desc = at_xdmac_get_desc(atchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 		if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			dev_err(chan2dev(chan), "can't get descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 				list_splice_init(&first->descs_list, &atchan->free_descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			spin_unlock_irqrestore(&atchan->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		spin_unlock_irqrestore(&atchan->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 		dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			"%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			__func__, desc, &desc->tx_dma_desc.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		if (direction == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			desc->lld.mbr_sa = atchan->sconfig.src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			desc->lld.mbr_da = buf_addr + i * period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 			desc->lld.mbr_sa = buf_addr + i * period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 			desc->lld.mbr_da = atchan->sconfig.dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		desc->lld.mbr_cfg = atchan->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			| AT_XDMAC_MBR_UBC_NDEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 			| AT_XDMAC_MBR_UBC_NSEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 			| period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 			 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		/* Chain lld. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		if (prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			at_xdmac_queue_desc(chan, prev, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		prev = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 			first = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 			 __func__, desc, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		list_add_tail(&desc->desc_node, &first->descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	at_xdmac_queue_desc(chan, prev, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	first->tx_dma_desc.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	first->xfer_size = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	first->direction = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	return &first->tx_dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	u32 width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	 * Check address alignment to select the greater data width we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	 * can use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	 * Some XDMAC implementations don't provide dword transfer, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	 * this case selecting dword has the same behavior as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	 * selecting word transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 	if (!(addr & 7)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 		width = AT_XDMAC_CC_DWIDTH_DWORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	} else if (!(addr & 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 		width = AT_XDMAC_CC_DWIDTH_WORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	} else if (!(addr & 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		width = AT_XDMAC_CC_DWIDTH_HALFWORD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 		width = AT_XDMAC_CC_DWIDTH_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	return width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) static struct at_xdmac_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 				struct at_xdmac_chan *atchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 				struct at_xdmac_desc *prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 				dma_addr_t src, dma_addr_t dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 				struct dma_interleaved_template *xt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 				struct data_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	struct at_xdmac_desc	*desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	u32			dwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	size_t			ublen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	 * WARNING: The channel configuration is set here since there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 	 * dmaengine_slave_config call in this case. Moreover we don't know the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	 * direction, it involves we can't dynamically set the source and dest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	 * interface so we have to use the same one. Only interface 0 allows EBI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	 * access. Hopefully we can access DDR through both ports (at least on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	 * SAMA5D4x), so we can use the same interface for source and dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 	 * that solves the fact we don't know the direction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	 * ERRATA: Even if useless for memory transfers, the PERID has to not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	 * match the one of another channel. If not, it could lead to spurious
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	 * flag status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	u32			chan_cc = AT_XDMAC_CC_PERID(0x3f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 					| AT_XDMAC_CC_DIF(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 					| AT_XDMAC_CC_SIF(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 					| AT_XDMAC_CC_MBSIZE_SIXTEEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 					| AT_XDMAC_CC_TYPE_MEM_TRAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			"%s: chunk too big (%zu, max size %lu)...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			__func__, chunk->size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 			AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	if (prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			"Adding items at the end of desc 0x%p\n", prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	if (xt->src_inc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		if (xt->src_sgl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 			chan_cc |=  AT_XDMAC_CC_SAM_UBS_AM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 			chan_cc |=  AT_XDMAC_CC_SAM_INCREMENTED_AM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	if (xt->dst_inc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 		if (xt->dst_sgl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 			chan_cc |=  AT_XDMAC_CC_DAM_UBS_AM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			chan_cc |=  AT_XDMAC_CC_DAM_INCREMENTED_AM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	spin_lock_irqsave(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	desc = at_xdmac_get_desc(atchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	spin_unlock_irqrestore(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 		dev_err(chan2dev(chan), "can't get descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	ublen = chunk->size >> dwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	desc->lld.mbr_sa = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	desc->lld.mbr_da = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		| AT_XDMAC_MBR_UBC_NDEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		| AT_XDMAC_MBR_UBC_NSEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 		| ublen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	desc->lld.mbr_cfg = chan_cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		desc->lld.mbr_ubc, desc->lld.mbr_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	/* Chain lld. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	if (prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		at_xdmac_queue_desc(chan, prev, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) at_xdmac_prep_interleaved(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 			  struct dma_interleaved_template *xt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 			  unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 	struct at_xdmac_desc	*prev = NULL, *first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	dma_addr_t		dst_addr, src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	size_t			src_skip = 0, dst_skip = 0, len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	struct data_chunk	*chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	int			i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	 * TODO: Handle the case where we have to repeat a chain of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	 * descriptors...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	if ((xt->numf > 1) && (xt->frame_size > 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		__func__, &xt->src_start, &xt->dst_start,	xt->numf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		xt->frame_size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	src_addr = xt->src_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	dst_addr = xt->dst_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	if (xt->numf > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		first = at_xdmac_interleaved_queue_desc(chan, atchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 							NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 							src_addr, dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 							xt, xt->sgl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		/* Length of the block is (BLEN+1) microblocks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		for (i = 0; i < xt->numf - 1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 			at_xdmac_increment_block_count(chan, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 			__func__, first, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		list_add_tail(&first->desc_node, &first->descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		for (i = 0; i < xt->frame_size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			size_t src_icg = 0, dst_icg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 			struct at_xdmac_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 			chunk = xt->sgl + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 			dst_icg = dmaengine_get_dst_icg(xt, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 			src_icg = dmaengine_get_src_icg(xt, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 			src_skip = chunk->size + src_icg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 			dst_skip = chunk->size + dst_icg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 				"%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 				__func__, chunk->size, src_icg, dst_icg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 			desc = at_xdmac_interleaved_queue_desc(chan, atchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 							       prev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 							       src_addr, dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 							       xt, chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 			if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 				list_splice_init(&first->descs_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 						 &atchan->free_descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 				first = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 			dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 				__func__, desc, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 			list_add_tail(&desc->desc_node, &first->descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 			if (xt->src_sgl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 				src_addr += src_skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 			if (xt->dst_sgl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 				dst_addr += dst_skip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			len += chunk->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			prev = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	first->tx_dma_desc.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	first->tx_dma_desc.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 	first->xfer_size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	return &first->tx_dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			 size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	struct at_xdmac_desc	*first = NULL, *prev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	size_t			remaining_size = len, xfer_size = 0, ublen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	dma_addr_t		src_addr = src, dst_addr = dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	u32			dwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	 * WARNING: We don't know the direction, it involves we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	 * dynamically set the source and dest interface so we have to use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	 * same one. Only interface 0 allows EBI access. Hopefully we can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	 * access DDR through both ports (at least on SAMA5D4x), so we can use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	 * the same interface for source and dest, that solves the fact we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	 * don't know the direction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	 * ERRATA: Even if useless for memory transfers, the PERID has to not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	 * match the one of another channel. If not, it could lead to spurious
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	 * flag status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	u32			chan_cc = AT_XDMAC_CC_PERID(0x3f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 					| AT_XDMAC_CC_DAM_INCREMENTED_AM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 					| AT_XDMAC_CC_SAM_INCREMENTED_AM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 					| AT_XDMAC_CC_DIF(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 					| AT_XDMAC_CC_SIF(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 					| AT_XDMAC_CC_MBSIZE_SIXTEEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 					| AT_XDMAC_CC_TYPE_MEM_TRAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	unsigned long		irqflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		__func__, &src, &dest, len, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	if (unlikely(!len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	/* Prepare descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	while (remaining_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		struct at_xdmac_desc	*desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 		spin_lock_irqsave(&atchan->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		desc = at_xdmac_get_desc(atchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		spin_unlock_irqrestore(&atchan->lock, irqflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 			dev_err(chan2dev(chan), "can't get descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 			if (first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 				list_splice_init(&first->descs_list, &atchan->free_descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		/* Update src and dest addresses. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		src_addr += xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		dst_addr += xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 			xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			xfer_size = remaining_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 		/* Check remaining length and change data width if needed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		dwidth = at_xdmac_align_width(chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 					      src_addr | dst_addr | xfer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 		ublen = xfer_size >> dwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 		remaining_size -= xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		desc->lld.mbr_sa = src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 		desc->lld.mbr_da = dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 		desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 			| AT_XDMAC_MBR_UBC_NDEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 			| AT_XDMAC_MBR_UBC_NSEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 			| ublen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 		desc->lld.mbr_cfg = chan_cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 			 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 			 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		/* Chain lld. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		if (prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 			at_xdmac_queue_desc(chan, prev, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		prev = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 			first = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 			 __func__, desc, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		list_add_tail(&desc->desc_node, &first->descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	first->tx_dma_desc.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	first->xfer_size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	return &first->tx_dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 							 struct at_xdmac_chan *atchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 							 dma_addr_t dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 							 size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 							 int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	struct at_xdmac_desc	*desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 	size_t			ublen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	u32			dwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	 * WARNING: The channel configuration is set here since there is no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	 * dmaengine_slave_config call in this case. Moreover we don't know the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	 * direction, it involves we can't dynamically set the source and dest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	 * interface so we have to use the same one. Only interface 0 allows EBI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	 * access. Hopefully we can access DDR through both ports (at least on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	 * SAMA5D4x), so we can use the same interface for source and dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	 * that solves the fact we don't know the direction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	 * ERRATA: Even if useless for memory transfers, the PERID has to not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	 * match the one of another channel. If not, it could lead to spurious
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	 * flag status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	u32			chan_cc = AT_XDMAC_CC_PERID(0x3f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 					| AT_XDMAC_CC_DAM_UBS_AM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 					| AT_XDMAC_CC_SAM_INCREMENTED_AM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 					| AT_XDMAC_CC_DIF(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 					| AT_XDMAC_CC_SIF(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 					| AT_XDMAC_CC_MBSIZE_SIXTEEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 					| AT_XDMAC_CC_MEMSET_HW_MODE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 					| AT_XDMAC_CC_TYPE_MEM_TRAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	dwidth = at_xdmac_align_width(chan, dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		dev_err(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 			"%s: Transfer too large, aborting...\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	spin_lock_irqsave(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	desc = at_xdmac_get_desc(atchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	spin_unlock_irqrestore(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		dev_err(chan2dev(chan), "can't get descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	ublen = len >> dwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	desc->lld.mbr_da = dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	desc->lld.mbr_ds = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		| AT_XDMAC_MBR_UBC_NDEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		| AT_XDMAC_MBR_UBC_NSEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		| ublen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	desc->lld.mbr_cfg = chan_cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		"%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		__func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		desc->lld.mbr_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 			 size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	struct at_xdmac_desc	*desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		__func__, &dest, len, value, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	if (unlikely(!len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	list_add_tail(&desc->desc_node, &desc->descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	desc->tx_dma_desc.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	desc->tx_dma_desc.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	desc->xfer_size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	return &desc->tx_dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 			    unsigned int sg_len, int value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 			    unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	struct at_xdmac_desc	*desc, *pdesc = NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 				*ppdesc = NULL, *first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	struct scatterlist	*sg, *psg = NULL, *ppsg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	size_t			stride = 0, pstride = 0, len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	int			i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	if (!sgl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		__func__, sg_len, value, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	/* Prepare descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 			__func__, &sg_dma_address(sg), sg_dma_len(sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 			value, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 		desc = at_xdmac_memset_create_desc(chan, atchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 						   sg_dma_address(sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 						   sg_dma_len(sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 						   value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		if (!desc && first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 			list_splice_init(&first->descs_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 					 &atchan->free_descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 			first = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		/* Update our strides */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		pstride = stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		if (psg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 			stride = sg_dma_address(sg) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 				(sg_dma_address(psg) + sg_dma_len(psg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		 * The scatterlist API gives us only the address and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		 * length of each elements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		 * Unfortunately, we don't have the stride, which we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		 * will need to compute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		 * That make us end up in a situation like this one:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		 *    len    stride    len    stride    len
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		 * +-------+        +-------+        +-------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		 * |  N-2  |        |  N-1  |        |   N   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 		 * +-------+        +-------+        +-------+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		 * We need all these three elements (N-2, N-1 and N)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		 * to actually take the decision on whether we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		 * queue N-1 or reuse N-2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		 * We will only consider N if it is the last element.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 		if (ppdesc && pdesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 			if ((stride == pstride) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 			    (sg_dma_len(ppsg) == sg_dma_len(psg))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 				dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 					"%s: desc 0x%p can be merged with desc 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 					__func__, pdesc, ppdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 				 * Increment the block count of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 				 * N-2 descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 				at_xdmac_increment_block_count(chan, ppdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 				ppdesc->lld.mbr_dus = stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 				 * Put back the N-1 descriptor in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 				 * free descriptor list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 				list_add_tail(&pdesc->desc_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 					      &atchan->free_descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 				 * Make our N-1 descriptor pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 				 * point to the N-2 since they were
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 				 * actually merged.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 				pdesc = ppdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 			 * Rule out the case where we don't have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 			 * pstride computed yet (our second sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 			 * element)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 			 * We also want to catch the case where there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			 * would be a negative stride,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 			} else if (pstride ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 				   sg_dma_address(sg) < sg_dma_address(psg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 				 * Queue the N-1 descriptor after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 				 * N-2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 				at_xdmac_queue_desc(chan, ppdesc, pdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 				 * Add the N-1 descriptor to the list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 				 * of the descriptors used for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 				 * transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 				list_add_tail(&desc->desc_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 					      &first->descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 				dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 					"%s: add desc 0x%p to descs_list 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 					__func__, desc, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		 * If we are the last element, just see if we have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		 * same size than the previous element.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		 * If so, we can merge it with the previous descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		 * since we don't care about the stride anymore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		if ((i == (sg_len - 1)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		    sg_dma_len(psg) == sg_dma_len(sg)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 			dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 				"%s: desc 0x%p can be merged with desc 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 				__func__, desc, pdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 			 * Increment the block count of the N-1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 			 * descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 			at_xdmac_increment_block_count(chan, pdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 			pdesc->lld.mbr_dus = stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 			 * Put back the N descriptor in the free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 			 * descriptor list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 			list_add_tail(&desc->desc_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 				      &atchan->free_descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 		/* Update our descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		ppdesc = pdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		pdesc = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		/* Update our scatter pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		ppsg = psg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		psg = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		len += sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	first->tx_dma_desc.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	first->tx_dma_desc.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	first->xfer_size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	return &first->tx_dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) static enum dma_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	struct at_xdmac_desc	*desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	struct list_head	*descs_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	enum dma_status		ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 	int			residue, retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	u32			cur_nda, check_nda, cur_ubc, mask, value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	u8			dwidth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	bool			initd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	ret = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	if (ret == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	if (!txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	spin_lock_irqsave(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	 * If the transfer has not been started yet, don't need to compute the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	 * residue, it's the transfer length.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	if (!desc->active_xfer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		dma_set_residue(txstate, desc->xfer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		goto spin_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	residue = desc->xfer_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	 * Flush FIFO: only relevant when the transfer is source peripheral
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	 * synchronized. Flush is needed before reading CUBC because data in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	 * the FIFO are not reported by CUBC. Reporting a residue of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	 * transfer length while we have data in FIFO can cause issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	 * Usecase: atmel USART has a timeout which means I have received
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	 * characters but there is no more character received for a while. On
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	 * timeout, it requests the residue. If the data are in the DMA FIFO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	 * we will return a residue of the transfer length. It means no data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	 * received. If an application is waiting for these data, it will hang
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	 * since we won't have another USART timeout without receiving new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	 * data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	if ((desc->lld.mbr_cfg & mask) == value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 		at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 		while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 			cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	 * The easiest way to compute the residue should be to pause the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 	 * but doing this can lead to miss some data as some devices don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 	 * have FIFO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	 * We need to read several registers because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 	 * - DMA is running therefore a descriptor change is possible while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	 * reading these registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 	 * - When the block transfer is done, the value of the CUBC register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	 * is set to its initial value until the fetch of the next descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 	 * This value will corrupt the residue calculation so we have to skip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 	 * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	 * INITD --------                    ------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 	 *              |____________________|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	 *       _______________________  _______________
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	 * NDA       @desc2             \/   @desc3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	 *       _______________________/\_______________
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 	 *       __________  ___________  _______________
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	 * CUBC       0    \/ MAX desc1 \/  MAX desc2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	 *       __________/\___________/\_______________
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	 * Since descriptors are aligned on 64 bits, we can assume that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	 * the update of NDA and CUBC is atomic.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 	 * Memory barriers are used to ensure the read order of the registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	 * A max number of retries is set because unlikely it could never ends.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 		check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 		rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 		cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 		rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 		initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 		cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		rmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		if ((check_nda == cur_nda) && initd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 		ret = DMA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		goto spin_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 	 * Flush FIFO: only relevant when the transfer is source peripheral
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 	 * synchronized. Another flush is needed here because CUBC is updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	 * when the controller sends the data write command. It can lead to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 	 * report data that are not written in the memory or the device. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 	 * FIFO flush ensures that data are really written.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 	if ((desc->lld.mbr_cfg & mask) == value) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 		at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 		while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 			cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	 * Remove size of all microblocks already transferred and the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	 * one. Then add the remaining size to transfer of the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 	 * microblock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	descs_list = &desc->descs_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) 	list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 		dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) 		residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) 		if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 	residue += cur_ubc << dwidth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	dma_set_residue(txstate, residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 		 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 		 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) spin_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 	spin_unlock_irqrestore(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) /* Call must be protected by lock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 				    struct at_xdmac_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 	dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 	 * Remove the transfer from the transfer list then move the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 	 * descriptors into the free descriptors list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 	list_del(&desc->xfer_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 	list_splice_init(&desc->descs_list, &atchan->free_descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 	struct at_xdmac_desc	*desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 	 * If channel is enabled, do nothing, advance_work will be triggered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) 	 * after the interruption.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) 	if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 		desc = list_first_entry(&atchan->xfers_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 					struct at_xdmac_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 					xfer_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 		dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 		if (!desc->active_xfer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 			at_xdmac_start_xfer(atchan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 	struct at_xdmac_desc		*desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	struct dma_async_tx_descriptor	*txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	spin_lock_irq(&atchan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 	if (list_empty(&atchan->xfers_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 		spin_unlock_irq(&atchan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 				xfer_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 	spin_unlock_irq(&atchan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	txd = &desc->tx_dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 	if (txd->flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 		dmaengine_desc_get_callback_invoke(txd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 	struct at_xdmac_desc	*bad_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 	 * The descriptor currently at the head of the active list is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 	 * broken. Since we don't have any way to report errors, we'll
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 	 * just have to scream loudly and try to continue with other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 	 * descriptors queued (if any).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 	if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 		dev_err(chan2dev(&atchan->chan), "read bus error!!!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) 		dev_err(chan2dev(&atchan->chan), "write bus error!!!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 		dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	spin_lock_irq(&atchan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	/* Channel must be disabled first as it's not done automatically */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	bad_desc = list_first_entry(&atchan->xfers_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 				    struct at_xdmac_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 				    xfer_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 	spin_unlock_irq(&atchan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	/* Print bad descriptor's details if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	dev_dbg(chan2dev(&atchan->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 		"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 		__func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 		bad_desc->lld.mbr_ubc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	/* Then continue with usual descriptor management */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) static void at_xdmac_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	struct at_xdmac_chan	*atchan = from_tasklet(atchan, t, tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 	struct at_xdmac_desc	*desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	u32			error_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		__func__, atchan->irq_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 	error_mask = AT_XDMAC_CIS_RBEIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		     | AT_XDMAC_CIS_WBEIS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		     | AT_XDMAC_CIS_ROIS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 	if (at_xdmac_chan_is_cyclic(atchan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		at_xdmac_handle_cyclic(atchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	} else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 		   || (atchan->irq_status & error_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 		struct dma_async_tx_descriptor  *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) 		if (atchan->irq_status & error_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 			at_xdmac_handle_error(atchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 		spin_lock_irq(&atchan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 		desc = list_first_entry(&atchan->xfers_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 					struct at_xdmac_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 					xfer_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 		dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 		if (!desc->active_xfer) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 			dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) 			spin_unlock_irq(&atchan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 		txd = &desc->tx_dma_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 		at_xdmac_remove_xfer(atchan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 		spin_unlock_irq(&atchan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 		dma_cookie_complete(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 		if (txd->flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 			dmaengine_desc_get_callback_invoke(txd, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 		dma_run_dependencies(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 		spin_lock_irq(&atchan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		at_xdmac_advance_work(atchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		spin_unlock_irq(&atchan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	struct at_xdmac		*atxdmac = (struct at_xdmac *)dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 	struct at_xdmac_chan	*atchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	u32			imr, status, pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 	u32			chan_imr, chan_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	int			i, ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 		imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 		status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 		pending = status & imr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 		dev_vdbg(atxdmac->dma.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 			 "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 			 __func__, status, imr, pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 		if (!pending)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 		/* We have to find which channel has generated the interrupt. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 		for (i = 0; i < atxdmac->dma.chancnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 			if (!((1 << i) & pending))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 			atchan = &atxdmac->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 			chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 			chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 			atchan->irq_status = chan_status & chan_imr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 			dev_vdbg(atxdmac->dma.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 				 "%s: chan%d: imr=0x%x, status=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 				 __func__, i, chan_imr, chan_status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 			dev_vdbg(chan2dev(&atchan->chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 				 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 				 __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 				 at_xdmac_chan_read(atchan, AT_XDMAC_CC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 				 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 				 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 				 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 				 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 				 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 			if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 				at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 			tasklet_schedule(&atchan->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 			ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 	} while (pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) static void at_xdmac_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	spin_lock_irqsave(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	at_xdmac_advance_work(atchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	spin_unlock_irqrestore(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) static int at_xdmac_device_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 				  struct dma_slave_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 	dev_dbg(chan2dev(chan), "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	spin_lock_irqsave(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 	ret = at_xdmac_set_slave_config(chan, config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	spin_unlock_irqrestore(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) static int at_xdmac_device_pause(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 	dev_dbg(chan2dev(chan), "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	spin_lock_irqsave(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 	       & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) 	spin_unlock_irqrestore(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) static int at_xdmac_device_resume(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	dev_dbg(chan2dev(chan), "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 	spin_lock_irqsave(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 	if (!at_xdmac_chan_is_paused(atchan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 		spin_unlock_irqrestore(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 	spin_unlock_irqrestore(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) static int at_xdmac_device_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	struct at_xdmac_desc	*desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 	struct at_xdmac		*atxdmac = to_at_xdmac(atchan->chan.device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	unsigned long		flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) 	dev_dbg(chan2dev(chan), "%s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) 	spin_lock_irqsave(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) 	at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) 	while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 	/* Cancel all pending transfers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 		at_xdmac_remove_xfer(atchan, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 	clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	spin_unlock_irqrestore(&atchan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) 	struct at_xdmac_desc	*desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 	int			i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) 	if (at_xdmac_chan_is_enabled(atchan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 		dev_err(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 			"can't allocate channel resources (channel enabled)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	if (!list_empty(&atchan->free_descs_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 		dev_err(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 			"can't allocate channel resources (channel not free from a previous use)\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	for (i = 0; i < init_nr_desc_per_channel; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 		desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 		if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 			dev_warn(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 				"only %d descriptors have been allocated\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 		list_add_tail(&desc->desc_node, &atchan->free_descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	dma_cookie_init(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) static void at_xdmac_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	struct at_xdmac		*atxdmac = to_at_xdmac(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 	struct at_xdmac_desc	*desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 	list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 		dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		list_del(&desc->desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 		dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) #ifdef CONFIG_PM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) static int atmel_xdmac_prepare(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	struct at_xdmac		*atxdmac = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	struct dma_chan		*chan, *_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 		struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 		/* Wait for transfer completion, except in cyclic case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 		if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 			return -EAGAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) #	define atmel_xdmac_prepare NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) static int atmel_xdmac_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	struct at_xdmac		*atxdmac = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	struct dma_chan		*chan, *_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 		struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 		atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		if (at_xdmac_chan_is_cyclic(atchan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 			if (!at_xdmac_chan_is_paused(atchan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 				at_xdmac_device_pause(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 			atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 			atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 			atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 	atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 	at_xdmac_off(atxdmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	clk_disable_unprepare(atxdmac->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) static int atmel_xdmac_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 	struct at_xdmac		*atxdmac = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 	struct at_xdmac_chan	*atchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	struct dma_chan		*chan, *_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	int			i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	ret = clk_prepare_enable(atxdmac->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	/* Clear pending interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 	for (i = 0; i < atxdmac->dma.chancnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 		atchan = &atxdmac->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 		while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 			cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		atchan = to_at_xdmac_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 		if (at_xdmac_chan_is_cyclic(atchan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 			if (at_xdmac_chan_is_paused(atchan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 				at_xdmac_device_resume(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 			at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 			at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 			at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 			wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 			at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) #endif /* CONFIG_PM_SLEEP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) static int at_xdmac_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	struct at_xdmac	*atxdmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 	int		irq, size, nr_channels, i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	void __iomem	*base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 	u32		reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 	if (irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		return irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	if (IS_ERR(base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 		return PTR_ERR(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	 * Read number of xdmac channels, read helper function can't be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 	 * since atxdmac is not yet allocated and we need to know the number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	 * of channels to do the allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	reg = readl_relaxed(base + AT_XDMAC_GTYPE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 	nr_channels = AT_XDMAC_NB_CH(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	if (nr_channels > AT_XDMAC_MAX_CHAN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 		dev_err(&pdev->dev, "invalid number of channels (%u)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 			nr_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 	size = sizeof(*atxdmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 	size += nr_channels * sizeof(struct at_xdmac_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 	atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 	if (!atxdmac) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 		dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	atxdmac->regs = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	atxdmac->irq = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	if (IS_ERR(atxdmac->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 		dev_err(&pdev->dev, "can't get dma_clk\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		return PTR_ERR(atxdmac->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 	/* Do not use dev res to prevent races with tasklet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		dev_err(&pdev->dev, "can't request irq\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	ret = clk_prepare_enable(atxdmac->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 		dev_err(&pdev->dev, "can't prepare or enable clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 		goto err_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	atxdmac->at_xdmac_desc_pool =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 		dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 				sizeof(struct at_xdmac_desc), 4, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	if (!atxdmac->at_xdmac_desc_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 		dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 		goto err_clk_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 	dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 	 * Without DMA_PRIVATE the driver is not able to allocate more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	 * one channel, second allocation fails in private_candidate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 	atxdmac->dma.dev				= &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 	atxdmac->dma.device_alloc_chan_resources	= at_xdmac_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	atxdmac->dma.device_free_chan_resources		= at_xdmac_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 	atxdmac->dma.device_tx_status			= at_xdmac_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 	atxdmac->dma.device_issue_pending		= at_xdmac_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	atxdmac->dma.device_prep_dma_cyclic		= at_xdmac_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 	atxdmac->dma.device_prep_interleaved_dma	= at_xdmac_prep_interleaved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 	atxdmac->dma.device_prep_dma_memcpy		= at_xdmac_prep_dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 	atxdmac->dma.device_prep_dma_memset		= at_xdmac_prep_dma_memset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 	atxdmac->dma.device_prep_dma_memset_sg		= at_xdmac_prep_dma_memset_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 	atxdmac->dma.device_prep_slave_sg		= at_xdmac_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 	atxdmac->dma.device_config			= at_xdmac_device_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 	atxdmac->dma.device_pause			= at_xdmac_device_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 	atxdmac->dma.device_resume			= at_xdmac_device_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 	atxdmac->dma.device_terminate_all		= at_xdmac_device_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 	atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 	atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	/* Disable all chans and interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 	at_xdmac_off(atxdmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 	/* Init channels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 	INIT_LIST_HEAD(&atxdmac->dma.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 	for (i = 0; i < nr_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 		struct at_xdmac_chan *atchan = &atxdmac->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 		atchan->chan.device = &atxdmac->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 		list_add_tail(&atchan->chan.device_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 			      &atxdmac->dma.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 		atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		atchan->mask = 1 << i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		spin_lock_init(&atchan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		INIT_LIST_HEAD(&atchan->xfers_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 		INIT_LIST_HEAD(&atchan->free_descs_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		/* Clear pending interrupts. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 		while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 			cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 	platform_set_drvdata(pdev, atxdmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	ret = dma_async_device_register(&atxdmac->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 		dev_err(&pdev->dev, "fail to register DMA engine device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 		goto err_clk_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	ret = of_dma_controller_register(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 					 at_xdmac_xlate, atxdmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 		dev_err(&pdev->dev, "could not register of dma controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 		goto err_dma_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 		 nr_channels, atxdmac->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) err_dma_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	dma_async_device_unregister(&atxdmac->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) err_clk_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 	clk_disable_unprepare(atxdmac->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) err_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	free_irq(atxdmac->irq, atxdmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) static int at_xdmac_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 	struct at_xdmac	*atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	int		i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	at_xdmac_off(atxdmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	of_dma_controller_free(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	dma_async_device_unregister(&atxdmac->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	clk_disable_unprepare(atxdmac->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 	free_irq(atxdmac->irq, atxdmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	for (i = 0; i < atxdmac->dma.chancnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		struct at_xdmac_chan *atchan = &atxdmac->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 		tasklet_kill(&atchan->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 		at_xdmac_free_chan_resources(&atchan->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	.prepare	= atmel_xdmac_prepare,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) static const struct of_device_id atmel_xdmac_dt_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 		.compatible = "atmel,sama5d4-dma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	}, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		/* sentinel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) static struct platform_driver at_xdmac_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	.probe		= at_xdmac_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	.remove		= at_xdmac_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 		.name		= "at_xdmac",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 		.of_match_table	= of_match_ptr(atmel_xdmac_dt_ids),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) 		.pm		= &atmel_xdmac_dev_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) static int __init at_xdmac_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) subsys_initcall(at_xdmac_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) MODULE_LICENSE("GPL");