Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2)  * TI EDMA DMA engine driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  * Copyright 2012 Texas Instruments
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * This program is free software; you can redistribute it and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * modify it under the terms of the GNU General Public License as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * published by the Free Software Foundation version 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * kind, whether express or implied; without even the implied warranty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * GNU General Public License for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/platform_data/edma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include "../dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include "../virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) /* Offsets matching "struct edmacc_param" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define PARM_OPT		0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define PARM_SRC		0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define PARM_A_B_CNT		0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define PARM_DST		0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define PARM_SRC_DST_BIDX	0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define PARM_LINK_BCNTRLD	0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define PARM_SRC_DST_CIDX	0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define PARM_CCNT		0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define PARM_SIZE		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) /* Offsets for EDMA CC global channel registers and their shadows */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define SH_ER			0x00	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define SH_ECR			0x08	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define SH_ESR			0x10	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define SH_CER			0x18	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define SH_EER			0x20	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define SH_EECR			0x28	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define SH_EESR			0x30	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define SH_SER			0x38	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define SH_SECR			0x40	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define SH_IER			0x50	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define SH_IECR			0x58	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define SH_IESR			0x60	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define SH_IPR			0x68	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define SH_ICR			0x70	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define SH_IEVAL		0x78
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define SH_QER			0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define SH_QEER			0x84
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define SH_QEECR		0x88
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define SH_QEESR		0x8c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define SH_QSER			0x90
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define SH_QSECR		0x94
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define SH_SIZE			0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) /* Offsets for EDMA CC global registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define EDMA_REV		0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define EDMA_CCCFG		0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define EDMA_QCHMAP		0x0200	/* 8 registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define EDMA_DMAQNUM		0x0240	/* 8 registers (4 on OMAP-L1xx) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #define EDMA_QDMAQNUM		0x0260
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define EDMA_QUETCMAP		0x0280
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define EDMA_QUEPRI		0x0284
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define EDMA_EMR		0x0300	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define EDMA_EMCR		0x0308	/* 64 bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define EDMA_QEMR		0x0310
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define EDMA_QEMCR		0x0314
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define EDMA_CCERR		0x0318
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #define EDMA_CCERRCLR		0x031c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #define EDMA_EEVAL		0x0320
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define EDMA_DRAE		0x0340	/* 4 x 64 bits*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define EDMA_QRAE		0x0380	/* 4 registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #define EDMA_QUEEVTENTRY	0x0400	/* 2 x 16 registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #define EDMA_QSTAT		0x0600	/* 2 registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define EDMA_QWMTHRA		0x0620
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #define EDMA_QWMTHRB		0x0624
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) #define EDMA_CCSTAT		0x0640
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #define EDMA_M			0x1000	/* global channel registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #define EDMA_ECR		0x1008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define EDMA_ECRH		0x100C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #define EDMA_SHADOW0		0x2000	/* 4 shadow regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #define EDMA_PARM		0x4000	/* PaRAM entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #define PARM_OFFSET(param_no)	(EDMA_PARM + ((param_no) << 5))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) #define EDMA_DCHMAP		0x0100  /* 64 registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) /* CCCFG register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) #define GET_NUM_DMACH(x)	(x & 0x7) /* bits 0-2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) #define GET_NUM_QDMACH(x)	((x & 0x70) >> 4) /* bits 4-6 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) #define GET_NUM_PAENTRY(x)	((x & 0x7000) >> 12) /* bits 12-14 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #define GET_NUM_EVQUE(x)	((x & 0x70000) >> 16) /* bits 16-18 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) #define GET_NUM_REGN(x)		((x & 0x300000) >> 20) /* bits 20-21 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) #define CHMAP_EXIST		BIT(24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) /* CCSTAT register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) #define EDMA_CCSTAT_ACTV	BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  * Max of 20 segments per channel to conserve PaRAM slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  * Also note that MAX_NR_SG should be atleast the no.of periods
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122)  * that are required for ASoC, otherwise DMA prep calls will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123)  * fail. Today davinci-pcm is the only user of this driver and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124)  * requires atleast 17 slots, so we setup the default to 20.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) #define MAX_NR_SG		20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #define EDMA_MAX_SLOTS		MAX_NR_SG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) #define EDMA_DESCRIPTORS	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) #define EDMA_CHANNEL_ANY		-1	/* for edma_alloc_channel() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) #define EDMA_SLOT_ANY			-1	/* for edma_alloc_slot() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) #define EDMA_CONT_PARAMS_ANY		 1001
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) #define EDMA_CONT_PARAMS_FIXED_EXACT	 1002
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  * 64bit array registers are split into two 32bit registers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  * reg0: channel/event 0-31
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  * reg1: channel/event 32-63
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  * bit 5 in the channel number tells the array index (0/1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  * bit 0-4 (0x1f) is the bit offset within the register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) #define EDMA_REG_ARRAY_INDEX(channel)	((channel) >> 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) #define EDMA_CHANNEL_BIT(channel)	(BIT((channel) & 0x1f))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) /* PaRAM slots are laid out like this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) struct edmacc_param {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	u32 opt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	u32 src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	u32 a_b_cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	u32 dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	u32 src_dst_bidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	u32 link_bcntrld;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	u32 src_dst_cidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	u32 ccnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) /* fields in edmacc_param.opt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) #define SAM		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) #define DAM		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) #define SYNCDIM		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) #define STATIC		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) #define EDMA_FWID	(0x07 << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) #define TCCMODE		BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) #define EDMA_TCC(t)	((t) << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) #define TCINTEN		BIT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) #define ITCINTEN	BIT(21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) #define TCCHEN		BIT(22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) #define ITCCHEN		BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) struct edma_pset {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	u32				len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	dma_addr_t			addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	struct edmacc_param		param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) struct edma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	struct virt_dma_desc		vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	struct list_head		node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	enum dma_transfer_direction	direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	int				cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	bool				polled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	int				absync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	int				pset_nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	struct edma_chan		*echan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	int				processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	 * The following 4 elements are used for residue accounting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	 * - processed_stat: the number of SG elements we have traversed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	 * so far to cover accounting. This is updated directly to processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	 * during edma_callback and is always <= processed, because processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	 * refers to the number of pending transfer (programmed to EDMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	 * controller), where as processed_stat tracks number of transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	 * accounted for so far.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	 * - residue: The amount of bytes we have left to transfer for this desc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	 * - residue_stat: The residue in bytes of data we have covered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	 * so far for accounting. This is updated directly to residue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	 * during callbacks to keep it current.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	 * - sg_len: Tracks the length of the current intermediate transfer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	 * this is required to update the residue during intermediate transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	 * completion callback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	int				processed_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	u32				sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	u32				residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	u32				residue_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 	struct edma_pset		pset[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) struct edma_cc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) struct edma_tc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	struct device_node		*node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	u16				id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) struct edma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 	struct virt_dma_chan		vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	struct list_head		node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	struct edma_desc		*edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	struct edma_cc			*ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	struct edma_tc			*tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	int				ch_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	bool				alloced;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	bool				hw_triggered;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 	int				slot[EDMA_MAX_SLOTS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	int				missed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	struct dma_slave_config		cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) struct edma_cc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	struct device			*dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	struct edma_soc_info		*info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	void __iomem			*base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	int				id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	bool				legacy_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	/* eDMA3 resource information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	unsigned			num_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	unsigned			num_qchannels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	unsigned			num_region;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 	unsigned			num_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	unsigned			num_tc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	bool				chmap_exist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	enum dma_event_q		default_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	unsigned int			ccint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	unsigned int			ccerrint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	 * The slot_inuse bit for each PaRAM slot is clear unless the slot is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	 * in use by Linux or if it is allocated to be used by DSP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	unsigned long *slot_inuse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	 * For tracking reserved channels used by DSP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	 * If the bit is cleared, the channel is allocated to be used by DSP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	 * and Linux must not touch it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	unsigned long *channels_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	struct dma_device		dma_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	struct dma_device		*dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	struct edma_chan		*slave_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	struct edma_tc			*tc_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	int				dummy_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) /* dummy param set used to (re)initialize parameter RAM slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) static const struct edmacc_param dummy_paramset = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	.link_bcntrld = 0xffff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	.ccnt = 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) #define EDMA_BINDING_LEGACY	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) #define EDMA_BINDING_TPCC	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) static const u32 edma_binding_type[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 	[EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	[EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) static const struct of_device_id edma_of_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		.compatible = "ti,edma3",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 		.data = &edma_binding_type[EDMA_BINDING_LEGACY],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		.compatible = "ti,edma3-tpcc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		.data = &edma_binding_type[EDMA_BINDING_TPCC],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) MODULE_DEVICE_TABLE(of, edma_of_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) static const struct of_device_id edma_tptc_of_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 	{ .compatible = "ti,edma3-tptc", },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) MODULE_DEVICE_TABLE(of, edma_tptc_of_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	return (unsigned int)__raw_readl(ecc->base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) static inline void edma_write(struct edma_cc *ecc, int offset, int val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	__raw_writel(val, ecc->base + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 			       unsigned or)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	unsigned val = edma_read(ecc, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	val &= and;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	val |= or;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	edma_write(ecc, offset, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	unsigned val = edma_read(ecc, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	val &= and;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	edma_write(ecc, offset, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 	unsigned val = edma_read(ecc, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	val |= or;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	edma_write(ecc, offset, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 					   int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	return edma_read(ecc, offset + (i << 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) static inline void edma_write_array(struct edma_cc *ecc, int offset, int i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 				    unsigned val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	edma_write(ecc, offset + (i << 2), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 				     unsigned and, unsigned or)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	edma_modify(ecc, offset + (i << 2), and, or);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 				 unsigned or)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	edma_or(ecc, offset + (i << 2), or);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 				  unsigned or)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	edma_or(ecc, offset + ((i * 2 + j) << 2), or);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 				     int j, unsigned val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 	edma_write(ecc, offset + ((i * 2 + j) << 2), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	return edma_read(ecc, EDMA_SHADOW0 + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 						   int offset, int i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) static inline void edma_shadow0_write(struct edma_cc *ecc, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 				      unsigned val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	edma_write(ecc, EDMA_SHADOW0 + offset, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 					    int i, unsigned val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 					   int param_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) static inline void edma_param_write(struct edma_cc *ecc, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 				    int param_no, unsigned val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) static inline void edma_param_modify(struct edma_cc *ecc, int offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 				     int param_no, unsigned and, unsigned or)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 				  unsigned and)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 				 unsigned or)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 					  int priority)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	int bit = queue_no * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) static void edma_set_chmap(struct edma_chan *echan, int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	struct edma_cc *ecc = echan->ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	if (ecc->chmap_exist) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 		slot = EDMA_CHAN_SLOT(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 		edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	struct edma_cc *ecc = echan->ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	int idx = EDMA_REG_ARRAY_INDEX(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	int ch_bit = EDMA_CHANNEL_BIT(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	if (enable) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		edma_shadow0_write_array(ecc, SH_IESR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		edma_shadow0_write_array(ecc, SH_IECR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469)  * paRAM slot management functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			    const struct edmacc_param *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	slot = EDMA_CHAN_SLOT(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	if (slot >= ecc->num_slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 	memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) static int edma_read_slot(struct edma_cc *ecc, unsigned slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			   struct edmacc_param *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	slot = EDMA_CHAN_SLOT(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	if (slot >= ecc->num_slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492)  * edma_alloc_slot - allocate DMA parameter RAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493)  * @ecc: pointer to edma_cc struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494)  * @slot: specific slot to allocate; negative for "any unused slot"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496)  * This allocates a parameter RAM slot, initializing it to hold a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  * dummy transfer.  Slots allocated using this routine have not been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498)  * mapped to a hardware DMA channel, and will normally be used by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499)  * linking to them from a slot associated with a DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501)  * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502)  * slots may be allocated on behalf of DSP firmware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504)  * Returns the number of the slot, else negative errno.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) static int edma_alloc_slot(struct edma_cc *ecc, int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	if (slot >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		slot = EDMA_CHAN_SLOT(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		/* Requesting entry paRAM slot for a HW triggered channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		if (ecc->chmap_exist && slot < ecc->num_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 			slot = EDMA_SLOT_ANY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	if (slot < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 		if (ecc->chmap_exist)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 			slot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 			slot = ecc->num_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 			slot = find_next_zero_bit(ecc->slot_inuse,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 						  ecc->num_slots,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 						  slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 			if (slot == ecc->num_slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 			if (!test_and_set_bit(slot, ecc->slot_inuse))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	} else if (slot >= ecc->num_slots) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	} else if (test_and_set_bit(slot, ecc->slot_inuse)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	edma_write_slot(ecc, slot, &dummy_paramset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	return EDMA_CTLR_CHAN(ecc->id, slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	slot = EDMA_CHAN_SLOT(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	if (slot >= ecc->num_slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	edma_write_slot(ecc, slot, &dummy_paramset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	clear_bit(slot, ecc->slot_inuse);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551)  * edma_link - link one parameter RAM slot to another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552)  * @ecc: pointer to edma_cc struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  * @from: parameter RAM slot originating the link
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  * @to: parameter RAM slot which is the link target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  * The originating slot should not be part of any active DMA transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	from = EDMA_CHAN_SLOT(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	to = EDMA_CHAN_SLOT(to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	if (from >= ecc->num_slots || to >= ecc->num_slots)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 			  PARM_OFFSET(to));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573)  * edma_get_position - returns the current transfer point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  * @ecc: pointer to edma_cc struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  * @slot: parameter RAM slot being examined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  * @dst:  true selects the dest position, false the source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  * Returns the position of the current active slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 				    bool dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	u32 offs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	slot = EDMA_CHAN_SLOT(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 	offs = PARM_OFFSET(slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	offs += dst ? PARM_DST : PARM_SRC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	return edma_read(ecc, offs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593)  * Channels with event associations will be triggered by their hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594)  * events, and channels without such associations will be triggered by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595)  * software.  (At this writing there is no interface for using software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596)  * triggers except with channels that don't support hardware triggers.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) static void edma_start(struct edma_chan *echan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	struct edma_cc *ecc = echan->ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	int idx = EDMA_REG_ARRAY_INDEX(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	int ch_bit = EDMA_CHANNEL_BIT(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	if (!echan->hw_triggered) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		/* EDMA channels without event association */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 			edma_shadow0_read_array(ecc, SH_ESR, idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		/* EDMA channel with event association */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		dev_dbg(ecc->dev, "ER%d %08x\n", idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 			edma_shadow0_read_array(ecc, SH_ER, idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		/* Clear any pending event or error */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		edma_write_array(ecc, EDMA_ECR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 		edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 		/* Clear any SER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		edma_shadow0_write_array(ecc, SH_EESR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		dev_dbg(ecc->dev, "EER%d %08x\n", idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 			edma_shadow0_read_array(ecc, SH_EER, idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) static void edma_stop(struct edma_chan *echan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	struct edma_cc *ecc = echan->ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	int idx = EDMA_REG_ARRAY_INDEX(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	int ch_bit = EDMA_CHANNEL_BIT(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	edma_shadow0_write_array(ecc, SH_EECR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 	/* clear possibly pending completion interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	edma_shadow0_write_array(ecc, SH_ICR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	dev_dbg(ecc->dev, "EER%d %08x\n", idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 		edma_shadow0_read_array(ecc, SH_EER, idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	/* REVISIT:  consider guarding against inappropriate event
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	 * chaining by overwriting with dummy_paramset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649)  * Temporarily disable EDMA hardware events on the specified channel,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650)  * preventing them from triggering new transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) static void edma_pause(struct edma_chan *echan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	edma_shadow0_write_array(echan->ecc, SH_EECR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 				 EDMA_REG_ARRAY_INDEX(channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 				 EDMA_CHANNEL_BIT(channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) /* Re-enable EDMA hardware events on the specified channel.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) static void edma_resume(struct edma_chan *echan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	edma_shadow0_write_array(echan->ecc, SH_EESR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 				 EDMA_REG_ARRAY_INDEX(channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 				 EDMA_CHANNEL_BIT(channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) static void edma_trigger_channel(struct edma_chan *echan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	struct edma_cc *ecc = echan->ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	int idx = EDMA_REG_ARRAY_INDEX(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	int ch_bit = EDMA_CHANNEL_BIT(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	edma_shadow0_write_array(ecc, SH_ESR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	dev_dbg(ecc->dev, "ESR%d %08x\n", idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 		edma_shadow0_read_array(ecc, SH_ESR, idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) static void edma_clean_channel(struct edma_chan *echan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	struct edma_cc *ecc = echan->ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 	int idx = EDMA_REG_ARRAY_INDEX(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	int ch_bit = EDMA_CHANNEL_BIT(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	dev_dbg(ecc->dev, "EMR%d %08x\n", idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 		edma_read_array(ecc, EDMA_EMR, idx));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	edma_shadow0_write_array(ecc, SH_ECR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	/* Clear the corresponding EMR bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	edma_write_array(ecc, EDMA_EMCR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	/* Clear any SER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	edma_shadow0_write_array(ecc, SH_SECR, idx, ch_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) /* Move channel to a specific event queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) static void edma_assign_channel_eventq(struct edma_chan *echan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 				       enum dma_event_q eventq_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	struct edma_cc *ecc = echan->ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	int bit = (channel & 0x7) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	/* default to low priority queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	if (eventq_no == EVENTQ_DEFAULT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		eventq_no = ecc->default_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	if (eventq_no >= ecc->num_tc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	eventq_no &= 7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 			  eventq_no << bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) static int edma_alloc_channel(struct edma_chan *echan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 			      enum dma_event_q eventq_no)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	struct edma_cc *ecc = echan->ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	if (!test_bit(echan->ch_num, ecc->channels_mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		dev_err(ecc->dev, "Channel%d is reserved, can not be used!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 			echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	/* ensure access through shadow region 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	edma_or_array2(ecc, EDMA_DRAE, 0, EDMA_REG_ARRAY_INDEX(channel),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		       EDMA_CHANNEL_BIT(channel));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	/* ensure no events are pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	edma_stop(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	edma_setup_interrupt(echan, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	edma_assign_channel_eventq(echan, eventq_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) static void edma_free_channel(struct edma_chan *echan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	/* ensure no events are pending */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	edma_stop(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	/* REVISIT should probably take out of shadow region 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	edma_setup_interrupt(echan, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) static inline struct edma_cc *to_edma_cc(struct dma_device *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	return container_of(d, struct edma_cc, dma_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	return container_of(c, struct edma_chan, vchan.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	return container_of(tx, struct edma_desc, vdesc.tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) static void edma_desc_free(struct virt_dma_desc *vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	kfree(container_of(vdesc, struct edma_desc, vdesc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) /* Dispatch a queued descriptor to the controller (caller holds lock) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) static void edma_execute(struct edma_chan *echan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	struct edma_cc *ecc = echan->ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	struct virt_dma_desc *vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	struct edma_desc *edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	struct device *dev = echan->vchan.chan.device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	int i, j, left, nslots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	if (!echan->edesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		/* Setup is needed for the first transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		vdesc = vchan_next_desc(&echan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 		if (!vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		list_del(&vdesc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		echan->edesc = to_edma_desc(&vdesc->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	edesc = echan->edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	/* Find out how many left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	left = edesc->pset_nr - edesc->processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	nslots = min(MAX_NR_SG, left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	edesc->sg_len = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	/* Write descriptor PaRAM set(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	for (i = 0; i < nslots; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		j = i + edesc->processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		edesc->sg_len += edesc->pset[j].len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		dev_vdbg(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 			 "\n pset[%d]:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 			 "  chnum\t%d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 			 "  slot\t%d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 			 "  opt\t%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 			 "  src\t%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			 "  dst\t%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 			 "  abcnt\t%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 			 "  ccnt\t%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 			 "  bidx\t%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			 "  cidx\t%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 			 "  lkrld\t%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 			 j, echan->ch_num, echan->slot[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 			 edesc->pset[j].param.opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 			 edesc->pset[j].param.src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 			 edesc->pset[j].param.dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 			 edesc->pset[j].param.a_b_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			 edesc->pset[j].param.ccnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			 edesc->pset[j].param.src_dst_bidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 			 edesc->pset[j].param.src_dst_cidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 			 edesc->pset[j].param.link_bcntrld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 		/* Link to the previous slot if not the last set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 		if (i != (nslots - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 			edma_link(ecc, echan->slot[i], echan->slot[i + 1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	edesc->processed += nslots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	 * If this is either the last set in a set of SG-list transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	 * then setup a link to the dummy slot, this results in all future
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	 * events being absorbed and that's OK because we're done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	if (edesc->processed == edesc->pset_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		if (edesc->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 			edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 			edma_link(ecc, echan->slot[nslots - 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 				  echan->ecc->dummy_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	if (echan->missed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		 * This happens due to setup times between intermediate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 		 * transfers in long SG lists which have to be broken up into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		 * transfers of MAX_NR_SG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		edma_clean_channel(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 		edma_stop(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		edma_start(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		edma_trigger_channel(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 		echan->missed = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	} else if (edesc->processed <= MAX_NR_SG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		dev_dbg(dev, "first transfer starting on channel %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 			echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		edma_start(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 			echan->ch_num, edesc->processed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		edma_resume(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) static int edma_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	struct edma_chan *echan = to_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	spin_lock_irqsave(&echan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	 * Stop DMA activity: we assume the callback will not be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	 * after edma_dma() returns (even if it does, it will see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	 * echan->edesc is NULL and exit.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	if (echan->edesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		edma_stop(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		/* Move the cyclic channel back to default queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		if (!echan->tc && echan->edesc->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		vchan_terminate_vdesc(&echan->edesc->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		echan->edesc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	vchan_get_all_descriptors(&echan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	spin_unlock_irqrestore(&echan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	vchan_dma_desc_free_list(&echan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) static void edma_synchronize(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	struct edma_chan *echan = to_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	vchan_synchronize(&echan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) static int edma_slave_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	struct dma_slave_config *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	struct edma_chan *echan = to_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	    cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	if (cfg->src_maxburst > chan->device->max_burst ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	    cfg->dst_maxburst > chan->device->max_burst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) static int edma_dma_pause(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	struct edma_chan *echan = to_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	if (!echan->edesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	edma_pause(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) static int edma_dma_resume(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	struct edma_chan *echan = to_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	edma_resume(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943)  * A PaRAM set configuration abstraction used by other modes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944)  * @chan: Channel who's PaRAM set we're configuring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945)  * @pset: PaRAM set to initialize and setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946)  * @src_addr: Source address of the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947)  * @dst_addr: Destination address of the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948)  * @burst: In units of dev_width, how much to send
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949)  * @dev_width: How much is the dev_width
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950)  * @dma_length: Total length of the DMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951)  * @direction: Direction of the transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 			    dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 			    unsigned int acnt, unsigned int dma_length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 			    enum dma_transfer_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	struct edma_chan *echan = to_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	struct device *dev = chan->device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	struct edmacc_param *param = &epset->param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	int bcnt, ccnt, cidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	int src_bidx, dst_bidx, src_cidx, dst_cidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	int absync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	/* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	if (!burst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		burst = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	 * If the maxburst is equal to the fifo width, use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	 * A-synced transfers. This allows for large contiguous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	 * buffer transfers using only one PaRAM set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	if (burst == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		 * For the A-sync case, bcnt and ccnt are the remainder
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		 * and quotient respectively of the division of:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		 * (dma_length / acnt) by (SZ_64K -1). This is so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		 * that in case bcnt over flows, we have ccnt to use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		 * Note: In A-sync tranfer only, bcntrld is used, but it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		 * only applies for sg_dma_len(sg) >= SZ_64K.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		 * In this case, the best way adopted is- bccnt for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		 * first frame will be the remainder below. Then for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		 * every successive frame, bcnt will be SZ_64K-1. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		 * is assured as bcntrld = 0xffff in end of function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		absync = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		ccnt = dma_length / acnt / (SZ_64K - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		 * If bcnt is non-zero, we have a remainder and hence an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		 * extra frame to transfer, so increment ccnt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		if (bcnt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 			ccnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 			bcnt = SZ_64K - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		cidx = acnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		 * If maxburst is greater than the fifo address_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		 * use AB-synced transfers where A count is the fifo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		 * address_width and B count is the maxburst. In this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		 * case, we are limited to transfers of C count frames
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		 * of (address_width * maxburst) where C count is limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		 * to SZ_64K-1. This places an upper bound on the length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		 * of an SG segment that can be handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		absync = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 		bcnt = burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		ccnt = dma_length / (acnt * bcnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		if (ccnt > (SZ_64K - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 			dev_err(dev, "Exceeded max SG segment size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		cidx = acnt * bcnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	epset->len = dma_length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		src_bidx = acnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 		src_cidx = cidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		dst_bidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		dst_cidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		epset->addr = src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 	} else if (direction == DMA_DEV_TO_MEM)  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		src_bidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		src_cidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		dst_bidx = acnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		dst_cidx = cidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		epset->addr = dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	} else if (direction == DMA_MEM_TO_MEM)  {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		src_bidx = acnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		src_cidx = cidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		dst_bidx = acnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		dst_cidx = cidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		epset->addr = src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		dev_err(dev, "%s: direction not implemented yet\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 	param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	/* Configure A or AB synchronized transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 	if (absync)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 		param->opt |= SYNCDIM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	param->src = src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 	param->dst = dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	param->a_b_cnt = bcnt << 16 | acnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 	param->ccnt = ccnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	 * Only time when (bcntrld) auto reload is required is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	 * A-sync case, and in this case, a requirement of reload value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	 * of SZ_64K-1 only is assured. 'link' is initially set to NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	 * and then later will be populated by edma_execute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	param->link_bcntrld = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	return absync;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) static struct dma_async_tx_descriptor *edma_prep_slave_sg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	unsigned int sg_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	unsigned long tx_flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 	struct edma_chan *echan = to_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	struct device *dev = chan->device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	struct edma_desc *edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	dma_addr_t src_addr = 0, dst_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	enum dma_slave_buswidth dev_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	u32 burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 	int i, nslots, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	if (unlikely(!echan || !sgl || !sg_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	if (direction == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 		src_addr = echan->cfg.src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		dev_width = echan->cfg.src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		burst = echan->cfg.src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	} else if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		dst_addr = echan->cfg.dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		dev_width = echan->cfg.dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 		burst = echan->cfg.dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	edesc = kzalloc(struct_size(edesc, pset, sg_len), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 	if (!edesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	edesc->pset_nr = sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 	edesc->residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	edesc->direction = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 	edesc->echan = echan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	/* Allocate a PaRAM slot, if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 	nslots = min_t(unsigned, MAX_NR_SG, sg_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	for (i = 0; i < nslots; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 		if (echan->slot[i] < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 			echan->slot[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 				edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 			if (echan->slot[i] < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 				kfree(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 				dev_err(dev, "%s: Failed to allocate slot\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 					__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	/* Configure PaRAM sets for each SG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		/* Get address for each SG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 		if (direction == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 			dst_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 			src_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 				       dst_addr, burst, dev_width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 				       sg_dma_len(sg), direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 			kfree(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 		edesc->absync = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 		edesc->residue += sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 		if (i == sg_len - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			/* Enable completion interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 			edesc->pset[i].param.opt |= TCINTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		else if (!((i+1) % MAX_NR_SG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			 * Enable early completion interrupt for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 			 * intermediateset. In this case the driver will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 			 * notified when the paRAM set is submitted to TC. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 			 * will allow more time to set up the next set of slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 			edesc->pset[i].param.opt |= (TCINTEN | TCCMODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	edesc->residue_stat = edesc->residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	size_t len, unsigned long tx_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	int ret, nslots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	struct edma_desc *edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	struct device *dev = chan->device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	struct edma_chan *echan = to_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	unsigned int width, pset_len, array_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	if (unlikely(!echan || !len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	/* Align the array size (acnt block) with the transfer properties */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	switch (__ffs((src | dest | len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 		array_size = SZ_32K - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 		array_size = SZ_32K - 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 		array_size = SZ_32K - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	if (len < SZ_64K) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 		 * Transfer size less than 64K can be handled with one paRAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 		 * slot and with one burst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 		 * ACNT = length
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 		width = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		pset_len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		nslots = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 		 * Transfer size bigger than 64K will be handled with maximum of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		 * two paRAM slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		 * slot1: (full_length / 32767) times 32767 bytes bursts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 		 *	  ACNT = 32767, length1: (full_length / 32767) * 32767
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		 * slot2: the remaining amount of data after slot1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		 *	  ACNT = full_length - length1, length2 = ACNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		 * When the full_length is multibple of 32767 one slot can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		 * used to complete the transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 		width = array_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		pset_len = rounddown(len, width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		/* One slot is enough for lengths multiple of (SZ_32K -1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		if (unlikely(pset_len == len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 			nslots = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 			nslots = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	if (!edesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	edesc->pset_nr = nslots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	edesc->residue = edesc->residue_stat = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	edesc->direction = DMA_MEM_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	edesc->echan = echan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 			       width, pset_len, DMA_MEM_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		kfree(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	edesc->absync = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 	edesc->pset[0].param.opt |= ITCCHEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	if (nslots == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		/* Enable transfer complete interrupt if requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		if (tx_flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 			edesc->pset[0].param.opt |= TCINTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		/* Enable transfer complete chaining for the first slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		edesc->pset[0].param.opt |= TCCHEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		if (echan->slot[1] < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 			echan->slot[1] = edma_alloc_slot(echan->ecc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 							 EDMA_SLOT_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 			if (echan->slot[1] < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 				kfree(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 				dev_err(dev, "%s: Failed to allocate slot\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 					__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 		dest += pset_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		src += pset_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		pset_len = width = len % array_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 				       width, pset_len, DMA_MEM_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 			kfree(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		edesc->pset[1].param.opt |= ITCCHEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		/* Enable transfer complete interrupt if requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		if (tx_flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 			edesc->pset[1].param.opt |= TCINTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 	if (!(tx_flags & DMA_PREP_INTERRUPT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		edesc->polled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) edma_prep_dma_interleaved(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 			  struct dma_interleaved_template *xt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 			  unsigned long tx_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	struct device *dev = chan->device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	struct edma_chan *echan = to_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	struct edmacc_param *param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	struct edma_desc *edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	size_t src_icg, dst_icg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	int src_bidx, dst_bidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	/* Slave mode is not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	if (is_slave_direction(xt->dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	if (xt->frame_size != 1 || xt->numf == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	if (xt->sgl[0].size > SZ_64K || xt->numf > SZ_64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	if (src_icg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		src_bidx = src_icg + xt->sgl[0].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	} else if (xt->src_inc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 		src_bidx = xt->sgl[0].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		dev_err(dev, "%s: SRC constant addressing is not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	if (dst_icg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		dst_bidx = dst_icg + xt->sgl[0].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	} else if (xt->dst_inc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		dst_bidx = xt->sgl[0].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 		dev_err(dev, "%s: DST constant addressing is not supported\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	if (src_bidx > SZ_64K || dst_bidx > SZ_64K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	edesc = kzalloc(struct_size(edesc, pset, 1), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	if (!edesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 	edesc->direction = DMA_MEM_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	edesc->echan = echan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	edesc->pset_nr = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	param = &edesc->pset[0].param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	param->src = xt->src_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	param->dst = xt->dst_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	param->a_b_cnt = xt->numf << 16 | xt->sgl[0].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	param->ccnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	param->src_dst_cidx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	param->opt |= ITCCHEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	/* Enable transfer complete interrupt if requested */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 	if (tx_flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		param->opt |= TCINTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		edesc->polled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	size_t period_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	unsigned long tx_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	struct edma_chan *echan = to_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	struct device *dev = chan->device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	struct edma_desc *edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	dma_addr_t src_addr, dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	enum dma_slave_buswidth dev_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	bool use_intermediate = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	u32 burst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	int i, ret, nslots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	if (unlikely(!echan || !buf_len || !period_len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	if (direction == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		src_addr = echan->cfg.src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 		dst_addr = buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		dev_width = echan->cfg.src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		burst = echan->cfg.src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	} else if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 		src_addr = buf_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 		dst_addr = echan->cfg.dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		dev_width = echan->cfg.dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		burst = echan->cfg.dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	if (unlikely(buf_len % period_len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		dev_err(dev, "Period should be multiple of Buffer length\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	nslots = (buf_len / period_len) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	 * Cyclic DMA users such as audio cannot tolerate delays introduced
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	 * by cases where the number of periods is more than the maximum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 	 * number of SGs the EDMA driver can handle at a time. For DMA types
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 	 * such as Slave SGs, such delays are tolerable and synchronized,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 	 * but the synchronization is difficult to achieve with Cyclic and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	 * cannot be guaranteed, so we error out early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	if (nslots > MAX_NR_SG) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		 * If the burst and period sizes are the same, we can put
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		 * the full buffer into a single period and activate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 		 * intermediate interrupts. This will produce interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 		 * after each burst, which is also after each desired period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 		if (burst == period_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 			period_len = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 			nslots = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 			use_intermediate = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	edesc = kzalloc(struct_size(edesc, pset, nslots), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	if (!edesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 	edesc->cyclic = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 	edesc->pset_nr = nslots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) 	edesc->residue = edesc->residue_stat = buf_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	edesc->direction = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	edesc->echan = echan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 		__func__, echan->ch_num, nslots, period_len, buf_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	for (i = 0; i < nslots; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		/* Allocate a PaRAM slot, if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 		if (echan->slot[i] < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 			echan->slot[i] =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 				edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 			if (echan->slot[i] < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 				kfree(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 				dev_err(dev, "%s: Failed to allocate slot\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 					__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 				return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		if (i == nslots - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 			memcpy(&edesc->pset[i], &edesc->pset[0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 			       sizeof(edesc->pset[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 				       dst_addr, burst, dev_width, period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 				       direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 		if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 			kfree(edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		if (direction == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 			dst_addr += period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 			src_addr += period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 		dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		dev_vdbg(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 			"\n pset[%d]:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 			"  chnum\t%d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 			"  slot\t%d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 			"  opt\t%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 			"  src\t%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 			"  dst\t%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 			"  abcnt\t%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 			"  ccnt\t%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 			"  bidx\t%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 			"  cidx\t%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 			"  lkrld\t%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 			i, echan->ch_num, echan->slot[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 			edesc->pset[i].param.opt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 			edesc->pset[i].param.src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 			edesc->pset[i].param.dst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 			edesc->pset[i].param.a_b_cnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 			edesc->pset[i].param.ccnt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 			edesc->pset[i].param.src_dst_bidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 			edesc->pset[i].param.src_dst_cidx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 			edesc->pset[i].param.link_bcntrld);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 		edesc->absync = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 		 * Enable period interrupt only if it is requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 		if (tx_flags & DMA_PREP_INTERRUPT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) 			edesc->pset[i].param.opt |= TCINTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) 			/* Also enable intermediate interrupts if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) 			if (use_intermediate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) 				edesc->pset[i].param.opt |= ITCINTEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) 	/* Place the cyclic channel to highest priority queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) 	if (!echan->tc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) 		edma_assign_channel_eventq(echan, EVENTQ_0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) 	return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) static void edma_completion_handler(struct edma_chan *echan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) 	struct device *dev = echan->vchan.chan.device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) 	struct edma_desc *edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 	spin_lock(&echan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 	edesc = echan->edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) 	if (edesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 		if (edesc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 			vchan_cyclic_callback(&edesc->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 			spin_unlock(&echan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		} else if (edesc->processed == edesc->pset_nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 			edesc->residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 			edma_stop(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 			vchan_cookie_complete(&edesc->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 			echan->edesc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 			dev_dbg(dev, "Transfer completed on channel %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 				echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 			dev_dbg(dev, "Sub transfer completed on channel %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) 				echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) 			edma_pause(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) 			/* Update statistics for tx_status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) 			edesc->residue -= edesc->sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) 			edesc->residue_stat = edesc->residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) 			edesc->processed_stat = edesc->processed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) 		edma_execute(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) 	spin_unlock(&echan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) /* eDMA interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) static irqreturn_t dma_irq_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) 	struct edma_cc *ecc = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) 	int ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 	u32 sh_ier;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 	u32 sh_ipr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 	u32 bank;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	ctlr = ecc->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 	if (ctlr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 	dev_vdbg(ecc->dev, "dma_irq_handler\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 	if (!sh_ipr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 		sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		if (!sh_ipr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 			return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 		sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) 		bank = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) 		sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) 		bank = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) 		u32 slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) 		u32 channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) 		slot = __ffs(sh_ipr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) 		sh_ipr &= ~(BIT(slot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) 		if (sh_ier & BIT(slot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) 			channel = (bank << 5) | slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) 			/* Clear the corresponding IPR bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) 			edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) 			edma_completion_handler(&ecc->slave_chans[channel]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) 	} while (sh_ipr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 	edma_shadow0_write(ecc, SH_IEVAL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) static void edma_error_handler(struct edma_chan *echan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 	struct edma_cc *ecc = echan->ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 	struct device *dev = echan->vchan.chan.device->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	struct edmacc_param p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 	if (!echan->edesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) 	spin_lock(&echan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) 	err = edma_read_slot(ecc, echan->slot[0], &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) 	 * Issue later based on missed flag which will be sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) 	 * to happen as:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) 	 * (1) we finished transmitting an intermediate slot and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) 	 *     edma_execute is coming up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612) 	 * (2) or we finished current transfer and issue will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) 	 *     call edma_execute.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) 	 * Important note: issuing can be dangerous here and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	 * lead to some nasty recursion when we are in a NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 	 * slot. So we avoid doing so and set the missed flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		dev_dbg(dev, "Error on null slot, setting miss\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 		echan->missed = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 		 * The slot is already programmed but the event got
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 		 * missed, so its safe to issue it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		dev_dbg(dev, "Missed event, TRIGGERING\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 		edma_clean_channel(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) 		edma_stop(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 		edma_start(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) 		edma_trigger_channel(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) 	spin_unlock(&echan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) static inline bool edma_error_pending(struct edma_cc *ecc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) 	if (edma_read_array(ecc, EDMA_EMR, 0) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 	    edma_read_array(ecc, EDMA_EMR, 1) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) 	    edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) /* eDMA error interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) static irqreturn_t dma_ccerr_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) 	struct edma_cc *ecc = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) 	int ctlr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) 	unsigned int cnt = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	ctlr = ecc->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) 	if (ctlr < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) 	dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	if (!edma_error_pending(ecc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 		 * The registers indicate no pending error event but the irq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 		 * handler has been called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 		 * Ask eDMA to re-evaluate the error registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 		dev_err(ecc->dev, "%s: Error interrupt without error event!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 			__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 		edma_write(ecc, EDMA_EEVAL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 	while (1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		/* Event missed register(s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 		for (j = 0; j < 2; j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 			unsigned long emr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) 			val = edma_read_array(ecc, EDMA_EMR, j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 			if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 			dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 			emr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 			for (i = find_next_bit(&emr, 32, 0); i < 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 			     i = find_next_bit(&emr, 32, i + 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 				int k = (j << 5) + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 				/* Clear the corresponding EMR bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 				edma_write_array(ecc, EDMA_EMCR, j, BIT(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 				/* Clear any SER */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 				edma_shadow0_write_array(ecc, SH_SECR, j,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 							 BIT(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 				edma_error_handler(&ecc->slave_chans[k]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 		val = edma_read(ecc, EDMA_QEMR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) 		if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 			dev_dbg(ecc->dev, "QEMR 0x%02x\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) 			/* Not reported, just clear the interrupt reason. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) 			edma_write(ecc, EDMA_QEMCR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 			edma_shadow0_write(ecc, SH_QSECR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 		val = edma_read(ecc, EDMA_CCERR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 			dev_warn(ecc->dev, "CCERR 0x%08x\n", val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 			/* Not reported, just clear the interrupt reason. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 			edma_write(ecc, EDMA_CCERRCLR, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 		if (!edma_error_pending(ecc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 		cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 		if (cnt > 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	edma_write(ecc, EDMA_EEVAL, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) /* Alloc channel resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) static int edma_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 	struct edma_chan *echan = to_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) 	struct edma_cc *ecc = echan->ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 	struct device *dev = ecc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) 	enum dma_event_q eventq_no = EVENTQ_DEFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 	if (echan->tc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 		eventq_no = echan->tc->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 	} else if (ecc->tc_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 		/* memcpy channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 		echan->tc = &ecc->tc_list[ecc->info->default_queue];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 		eventq_no = echan->tc->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 	ret = edma_alloc_channel(echan, eventq_no);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 	echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) 	if (echan->slot[0] < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) 		dev_err(dev, "Entry slot allocation failed for channel %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 			EDMA_CHAN_SLOT(echan->ch_num));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 		ret = echan->slot[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 		goto err_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 	/* Set up channel -> slot mapping for the entry slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 	edma_set_chmap(echan, echan->slot[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 	echan->alloced = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 	dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 		echan->hw_triggered ? "HW" : "SW");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) err_slot:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) 	edma_free_channel(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) /* Free channel resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) static void edma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	struct edma_chan *echan = to_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) 	struct device *dev = echan->ecc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) 	/* Terminate transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	edma_stop(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	vchan_free_chan_resources(&echan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	/* Free EDMA PaRAM slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) 	for (i = 0; i < EDMA_MAX_SLOTS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 		if (echan->slot[i] >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) 			edma_free_slot(echan->ecc, echan->slot[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 			echan->slot[i] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 	/* Set entry slot to the dummy slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	edma_set_chmap(echan, echan->ecc->dummy_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 	/* Free EDMA channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	if (echan->alloced) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 		edma_free_channel(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 		echan->alloced = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 	echan->tc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) 	echan->hw_triggered = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) 	dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) 		EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) /* Send pending descriptor to hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) static void edma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) 	struct edma_chan *echan = to_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	spin_lock_irqsave(&echan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 		edma_execute(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	spin_unlock_irqrestore(&echan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)  * This limit exists to avoid a possible infinite loop when waiting for proof
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816)  * that a particular transfer is completed. This limit can be hit if there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817)  * are large bursts to/from slow devices or the CPU is never able to catch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)  * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)  * RX-FIFO, as many as 55 loops have been seen.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) #define EDMA_MAX_TR_WAIT_LOOPS 1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) static u32 edma_residue(struct edma_desc *edesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) 	bool dst = edesc->direction == DMA_DEV_TO_MEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) 	int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) 	struct edma_chan *echan = edesc->echan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) 	struct edma_pset *pset = edesc->pset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) 	dma_addr_t done, pos, pos_old;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	int channel = EDMA_CHAN_SLOT(echan->ch_num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) 	int idx = EDMA_REG_ARRAY_INDEX(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 	int ch_bit = EDMA_CHANNEL_BIT(channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) 	int event_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 	 * We always read the dst/src position from the first RamPar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	 * pset. That's the one which is active now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 	pos = edma_get_position(echan->ecc, echan->slot[0], dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 	 * "pos" may represent a transfer request that is still being
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 	 * processed by the EDMACC or EDMATC. We will busy wait until
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 	 * any one of the situations occurs:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 	 *   1. while and event is pending for the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 	 *   2. a position updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 	 *   3. we hit the loop limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 	if (is_slave_direction(edesc->direction))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 		event_reg = SH_ER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 		event_reg = SH_ESR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	pos_old = pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) 	while (edma_shadow0_read_array(echan->ecc, event_reg, idx) & ch_bit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 		pos = edma_get_position(echan->ecc, echan->slot[0], dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) 		if (pos != pos_old)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) 		if (!--loop_count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 			dev_dbg_ratelimited(echan->vchan.chan.device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) 				"%s: timeout waiting for PaRAM update\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) 				__func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	 * Cyclic is simple. Just subtract pset[0].addr from pos.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 	 * We never update edesc->residue in the cyclic case, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	 * can tell the remaining room to the end of the circular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 	 * buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 	if (edesc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 		done = pos - pset->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		edesc->residue_stat = edesc->residue - done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		return edesc->residue_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 	 * If the position is 0, then EDMA loaded the closing dummy slot, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 	 * transfer is completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) 	if (!pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	 * For SG operation we catch up with the last processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) 	 * status.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) 	pset += edesc->processed_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		 * If we are inside this pset address range, we know
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		 * this is the active one. Get the current delta and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 		 * stop walking the psets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 		if (pos >= pset->addr && pos < pset->addr + pset->len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) 			return edesc->residue_stat - (pos - pset->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) 		/* Otherwise mark it done and update residue_stat. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) 		edesc->processed_stat++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 		edesc->residue_stat -= pset->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) 	return edesc->residue_stat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) /* Check request completion status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) static enum dma_status edma_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) 				      dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) 				      struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) 	struct edma_chan *echan = to_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 	struct dma_tx_state txstate_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) 	enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 	ret = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) 	if (ret == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) 	/* Provide a dummy dma_tx_state for completion checking */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) 	if (!txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 		txstate = &txstate_tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	spin_lock_irqsave(&echan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 	if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 		txstate->residue = edma_residue(echan->edesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 		struct virt_dma_desc *vdesc = vchan_find_desc(&echan->vchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 							      cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 		if (vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 			txstate->residue = to_edma_desc(&vdesc->tx)->residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) 			txstate->residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) 	 * Mark the cookie completed if the residue is 0 for non cyclic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	 * transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 	if (ret != DMA_COMPLETE && !txstate->residue &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	    echan->edesc && echan->edesc->polled &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 	    echan->edesc->vdesc.tx.cookie == cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 		edma_stop(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 		vchan_cookie_complete(&echan->edesc->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 		echan->edesc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 		edma_execute(echan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 		ret = DMA_COMPLETE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 	spin_unlock_irqrestore(&echan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) 	if (!memcpy_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) 	while (*memcpy_channels != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 		if (*memcpy_channels == ch_num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 		memcpy_channels++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) #define EDMA_DMA_BUSWIDTHS	(BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 				 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 				 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 				 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) 	struct dma_device *s_ddev = &ecc->dma_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 	struct dma_device *m_ddev = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	s32 *memcpy_channels = ecc->info->memcpy_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) 	dma_cap_zero(s_ddev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 	dma_cap_set(DMA_SLAVE, s_ddev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) 	dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) 	if (ecc->legacy_mode && !memcpy_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 		dev_warn(ecc->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 			 "Legacy memcpy is enabled, things might not work\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 		dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 		dma_cap_set(DMA_INTERLEAVE, s_ddev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 		s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 		s_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		s_ddev->directions = BIT(DMA_MEM_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	s_ddev->device_prep_slave_sg = edma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	s_ddev->device_free_chan_resources = edma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	s_ddev->device_issue_pending = edma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 	s_ddev->device_tx_status = edma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 	s_ddev->device_config = edma_slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 	s_ddev->device_pause = edma_dma_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	s_ddev->device_resume = edma_dma_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) 	s_ddev->device_terminate_all = edma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 	s_ddev->device_synchronize = edma_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 	s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) 	s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) 	s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 	s_ddev->max_burst = SZ_32K - 1; /* CIDX: 16bit signed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 	s_ddev->dev = ecc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) 	INIT_LIST_HEAD(&s_ddev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) 	if (memcpy_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) 		m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 		if (!m_ddev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 			dev_warn(ecc->dev, "memcpy is disabled due to OoM\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 			memcpy_channels = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 			goto ch_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 		ecc->dma_memcpy = m_ddev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 		dma_cap_zero(m_ddev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 		dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) 		dma_cap_set(DMA_INTERLEAVE, m_ddev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) 		m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) 		m_ddev->device_prep_interleaved_dma = edma_prep_dma_interleaved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) 		m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) 		m_ddev->device_free_chan_resources = edma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) 		m_ddev->device_issue_pending = edma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) 		m_ddev->device_tx_status = edma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) 		m_ddev->device_config = edma_slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 		m_ddev->device_pause = edma_dma_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 		m_ddev->device_resume = edma_dma_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 		m_ddev->device_terminate_all = edma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 		m_ddev->device_synchronize = edma_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 		m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 		m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 		m_ddev->directions = BIT(DMA_MEM_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) 		m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) 		m_ddev->dev = ecc->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 		INIT_LIST_HEAD(&m_ddev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) 	} else if (!ecc->legacy_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) 		dev_info(ecc->dev, "memcpy is disabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) ch_setup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	for (i = 0; i < ecc->num_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 		struct edma_chan *echan = &ecc->slave_chans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 		echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		echan->ecc = ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		echan->vchan.desc_free = edma_desc_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 			vchan_init(&echan->vchan, m_ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 			vchan_init(&echan->vchan, s_ddev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) 		INIT_LIST_HEAD(&echan->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 		for (j = 0; j < EDMA_MAX_SLOTS; j++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) 			echan->slot[j] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 			      struct edma_cc *ecc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) 	u32 value, cccfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	s8 (*queue_priority_map)[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) 	/* Decode the eDMA3 configuration from CCCFG register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 	cccfg = edma_read(ecc, EDMA_CCCFG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) 	value = GET_NUM_REGN(cccfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) 	ecc->num_region = BIT(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) 	value = GET_NUM_DMACH(cccfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) 	ecc->num_channels = BIT(value + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) 	value = GET_NUM_QDMACH(cccfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) 	ecc->num_qchannels = value * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) 	value = GET_NUM_PAENTRY(cccfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) 	ecc->num_slots = BIT(value + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) 	value = GET_NUM_EVQUE(cccfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) 	ecc->num_tc = value + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) 	ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) 	dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) 	dev_dbg(dev, "num_region: %u\n", ecc->num_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) 	dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) 	dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) 	dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) 	dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) 	dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) 	/* Nothing need to be done if queue priority is provided */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) 	if (pdata->queue_priority_mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) 	 * Configure TC/queue priority as follows:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) 	 * Q0 - priority 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) 	 * Q1 - priority 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) 	 * Q2 - priority 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) 	 * ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) 	 * The meaning of priority numbers: 0 highest priority, 7 lowest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) 	 * priority. So Q0 is the highest priority queue and the last queue has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) 	 * the lowest priority.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) 	queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) 					  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) 	if (!queue_priority_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) 	for (i = 0; i < ecc->num_tc; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) 		queue_priority_map[i][0] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) 		queue_priority_map[i][1] = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) 	queue_priority_map[i][0] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) 	queue_priority_map[i][1] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137) 	pdata->queue_priority_mapping = queue_priority_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) 	/* Default queue has the lowest priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) 	pdata->default_queue = i - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) #if IS_ENABLED(CONFIG_OF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) 			       size_t sz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) 	const char pname[] = "ti,edma-xbar-event-map";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) 	struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) 	void __iomem *xbar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) 	s16 (*xbar_chans)[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) 	size_t nelm = sz / sizeof(s16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) 	u32 shift, offset, mux;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) 	xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) 	if (!xbar_chans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) 	ret = of_address_to_resource(dev->of_node, 1, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) 	xbar = devm_ioremap(dev, res.start, resource_size(&res));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) 	if (!xbar)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) 	ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) 					 nelm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) 	/* Invalidate last entry for the other user of this mess */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) 	nelm >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) 	xbar_chans[nelm][0] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) 	xbar_chans[nelm][1] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) 	for (i = 0; i < nelm; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) 		shift = (xbar_chans[i][1] & 0x03) << 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) 		offset = xbar_chans[i][1] & 0xfffffffc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) 		mux = readl(xbar + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) 		mux &= ~(0xff << shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) 		mux |= xbar_chans[i][0] << shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) 		writel(mux, (xbar + offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) 	pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) 						     bool legacy_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) 	struct edma_soc_info *info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) 	struct property *prop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) 	int sz, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) 	info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) 	if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) 		return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) 	if (legacy_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) 		prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) 					&sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) 		if (prop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) 			ret = edma_xbar_event_map(dev, info, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) 			if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) 				return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) 		return info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) 	/* Get the list of channels allocated to be used for memcpy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) 	prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) 	if (prop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) 		const char pname[] = "ti,edma-memcpy-channels";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) 		size_t nelm = sz / sizeof(s32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) 		s32 *memcpy_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) 		memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) 					 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) 		if (!memcpy_ch)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) 			return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) 		ret = of_property_read_u32_array(dev->of_node, pname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) 						 (u32 *)memcpy_ch, nelm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) 			return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) 		memcpy_ch[nelm] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) 		info->memcpy_channels = memcpy_ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) 	prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235) 				&sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) 	if (prop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) 		const char pname[] = "ti,edma-reserved-slot-ranges";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) 		u32 (*tmp)[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) 		s16 (*rsv_slots)[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) 		size_t nelm = sz / sizeof(*tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) 		struct edma_rsv_info *rsv_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) 		int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) 		if (!nelm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) 			return info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) 		tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) 		if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) 			return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) 		rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) 		if (!rsv_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) 			kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) 			return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) 		rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) 					 GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) 		if (!rsv_slots) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) 			kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) 			return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) 		ret = of_property_read_u32_array(dev->of_node, pname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) 						 (u32 *)tmp, nelm * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) 			kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) 			return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) 		for (i = 0; i < nelm; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) 			rsv_slots[i][0] = tmp[i][0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) 			rsv_slots[i][1] = tmp[i][1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) 		rsv_slots[nelm][0] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) 		rsv_slots[nelm][1] = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) 		info->rsv = rsv_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279) 		info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) 		kfree(tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) 	return info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) 				      struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) 	struct edma_cc *ecc = ofdma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) 	struct dma_chan *chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) 	struct edma_chan *echan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) 	if (!ecc || dma_spec->args_count < 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) 	for (i = 0; i < ecc->num_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) 		echan = &ecc->slave_chans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) 		if (echan->ch_num == dma_spec->args[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) 			chan = &echan->vchan.chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306) 	if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) 	if (echan->ecc->legacy_mode && dma_spec->args_count == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) 	if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) 	    dma_spec->args[1] < echan->ecc->num_tc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) 		echan->tc = &echan->ecc->tc_list[dma_spec->args[1]];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) 	/* The channel is going to be used as HW synchronized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) 	echan->hw_triggered = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) 	return dma_get_slave_channel(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) 						     bool legacy_mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) 	return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) 				      struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) static bool edma_filter_fn(struct dma_chan *chan, void *param);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) static int edma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) 	struct edma_soc_info	*info = pdev->dev.platform_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) 	s8			(*queue_priority_mapping)[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) 	const s16		(*reserved)[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) 	int			i, irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) 	char			*irq_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347) 	struct resource		*mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) 	struct device_node	*node = pdev->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) 	struct device		*dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) 	struct edma_cc		*ecc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) 	bool			legacy_mode = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) 	if (node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) 		const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) 		match = of_match_node(edma_of_ids, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) 		if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) 			legacy_mode = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) 		info = edma_setup_info_from_dt(dev, legacy_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) 		if (IS_ERR(info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) 			dev_err(dev, "failed to get DT data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) 			return PTR_ERR(info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) 	if (!info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) 	ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) 	if (!ecc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) 	ecc->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) 	ecc->id = pdev->id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) 	ecc->legacy_mode = legacy_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) 	/* When booting with DT the pdev->id is -1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) 	if (ecc->id < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) 		ecc->id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) 	mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) 	if (!mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) 		dev_dbg(dev, "mem resource not found, using index 0\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) 		mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) 		if (!mem) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) 			dev_err(dev, "no mem resource?\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) 			return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) 	ecc->base = devm_ioremap_resource(dev, mem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) 	if (IS_ERR(ecc->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) 		return PTR_ERR(ecc->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) 	platform_set_drvdata(pdev, ecc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) 	pm_runtime_enable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) 	ret = pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) 		dev_err(dev, "pm_runtime_get_sync() failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) 		pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) 	/* Get eDMA3 configuration from IP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) 	ret = edma_setup_from_hw(dev, info, ecc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) 		goto err_disable_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) 	/* Allocate memory based on the information we got from the IP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) 	ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) 					sizeof(*ecc->slave_chans), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418) 	ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) 				       sizeof(unsigned long), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) 	ecc->channels_mask = devm_kcalloc(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) 					   BITS_TO_LONGS(ecc->num_channels),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) 					   sizeof(unsigned long), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) 	if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) 		goto err_disable_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) 	/* Mark all channels available initially */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) 	bitmap_fill(ecc->channels_mask, ecc->num_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) 	ecc->default_queue = info->default_queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) 	if (info->rsv) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) 		/* Set the reserved slots in inuse list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) 		reserved = info->rsv->rsv_slots;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) 		if (reserved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) 			for (i = 0; reserved[i][0] != -1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) 				bitmap_set(ecc->slot_inuse, reserved[i][0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) 					   reserved[i][1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) 		/* Clear channels not usable for Linux */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) 		reserved = info->rsv->rsv_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) 		if (reserved) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) 			for (i = 0; reserved[i][0] != -1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447) 				bitmap_clear(ecc->channels_mask, reserved[i][0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) 					     reserved[i][1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) 	for (i = 0; i < ecc->num_slots; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) 		/* Reset only unused - not reserved - paRAM slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454) 		if (!test_bit(i, ecc->slot_inuse))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) 			edma_write_slot(ecc, i, &dummy_paramset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) 	irq = platform_get_irq_byname(pdev, "edma3_ccint");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) 	if (irq < 0 && node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460) 		irq = irq_of_parse_and_map(node, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) 	if (irq >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) 		irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) 					  dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) 		ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466) 				       ecc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) 			dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) 			goto err_disable_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) 		ecc->ccint = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) 	irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475) 	if (irq < 0 && node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) 		irq = irq_of_parse_and_map(node, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478) 	if (irq >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) 		irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) 					  dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481) 		ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) 				       ecc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484) 			dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) 			goto err_disable_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) 		ecc->ccerrint = irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) 	ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) 	if (ecc->dummy_slot < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) 		dev_err(dev, "Can't allocate PaRAM dummy slot\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493) 		ret = ecc->dummy_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) 		goto err_disable_pm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) 	queue_priority_mapping = info->queue_priority_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) 	if (!ecc->legacy_mode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) 		int lowest_priority = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) 		unsigned int array_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) 		struct of_phandle_args tc_args;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) 		ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) 					    sizeof(*ecc->tc_list), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) 		if (!ecc->tc_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507) 			ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) 			goto err_reg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) 		for (i = 0;; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) 			ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) 							       1, i, &tc_args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514) 			if (ret || i == ecc->num_tc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) 			ecc->tc_list[i].node = tc_args.np;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) 			ecc->tc_list[i].id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) 			queue_priority_mapping[i][1] = tc_args.args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520) 			if (queue_priority_mapping[i][1] > lowest_priority) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) 				lowest_priority = queue_priority_mapping[i][1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) 				info->default_queue = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) 		/* See if we have optional dma-channel-mask array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527) 		array_max = DIV_ROUND_UP(ecc->num_channels, BITS_PER_TYPE(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) 		ret = of_property_read_variable_u32_array(node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529) 						"dma-channel-mask",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) 						(u32 *)ecc->channels_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) 						1, array_max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) 		if (ret > 0 && ret != array_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) 			dev_warn(dev, "dma-channel-mask is not complete.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) 		else if (ret == -EOVERFLOW || ret == -ENODATA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) 			dev_warn(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) 				 "dma-channel-mask is out of range or empty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) 	/* Event queue priority mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) 	for (i = 0; queue_priority_mapping[i][0] != -1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) 		edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) 					      queue_priority_mapping[i][1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) 	edma_write_array2(ecc, EDMA_DRAE, 0, 0, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) 	edma_write_array2(ecc, EDMA_DRAE, 0, 1, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) 	edma_write_array(ecc, EDMA_QRAE, 0, 0x0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) 	ecc->info = info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) 	/* Init the dma device and channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) 	edma_dma_init(ecc, legacy_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) 	for (i = 0; i < ecc->num_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) 		/* Do not touch reserved channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) 		if (!test_bit(i, ecc->channels_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558) 		/* Assign all channels to the default queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) 		edma_assign_channel_eventq(&ecc->slave_chans[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) 					   info->default_queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) 		/* Set entry slot to the dummy slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) 		edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) 	ecc->dma_slave.filter.map = info->slave_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) 	ecc->dma_slave.filter.mapcnt = info->slavecnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) 	ecc->dma_slave.filter.fn = edma_filter_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) 	ret = dma_async_device_register(&ecc->dma_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) 		dev_err(dev, "slave ddev registration failed (%d)\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) 		goto err_reg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) 	if (ecc->dma_memcpy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) 		ret = dma_async_device_register(ecc->dma_memcpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) 			dev_err(dev, "memcpy ddev registration failed (%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) 				ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580) 			dma_async_device_unregister(&ecc->dma_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) 			goto err_reg1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585) 	if (node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) 		of_dma_controller_register(node, of_edma_xlate, ecc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588) 	dev_info(dev, "TI EDMA DMA engine driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) err_reg1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) 	edma_free_slot(ecc, ecc->dummy_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) err_disable_pm:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595) 	pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) 	pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) static void edma_cleanupp_vchan(struct dma_device *dmadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) 	struct edma_chan *echan, *_echan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) 	list_for_each_entry_safe(echan, _echan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605) 			&dmadev->channels, vchan.chan.device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) 		list_del(&echan->vchan.chan.device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) 		tasklet_kill(&echan->vchan.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) static int edma_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) 	struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) 	struct edma_cc *ecc = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) 	devm_free_irq(dev, ecc->ccint, ecc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) 	devm_free_irq(dev, ecc->ccerrint, ecc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) 	edma_cleanupp_vchan(&ecc->dma_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) 	if (dev->of_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) 		of_dma_controller_free(dev->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623) 	dma_async_device_unregister(&ecc->dma_slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) 	if (ecc->dma_memcpy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) 		dma_async_device_unregister(ecc->dma_memcpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) 	edma_free_slot(ecc, ecc->dummy_slot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627) 	pm_runtime_put_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) 	pm_runtime_disable(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) #ifdef CONFIG_PM_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) static int edma_pm_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) 	struct edma_cc *ecc = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637) 	struct edma_chan *echan = ecc->slave_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) 	for (i = 0; i < ecc->num_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) 		if (echan[i].alloced)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642) 			edma_setup_interrupt(&echan[i], false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) static int edma_pm_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) 	struct edma_cc *ecc = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) 	struct edma_chan *echan = ecc->slave_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) 	s8 (*queue_priority_mapping)[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) 	/* re initialize dummy slot to dummy param set */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) 	edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) 	queue_priority_mapping = ecc->info->queue_priority_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) 	/* Event queue priority mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) 	for (i = 0; queue_priority_mapping[i][0] != -1; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) 		edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) 					      queue_priority_mapping[i][1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) 	for (i = 0; i < ecc->num_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) 		if (echan[i].alloced) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) 			/* ensure access through shadow region 0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) 			edma_or_array2(ecc, EDMA_DRAE, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) 				       EDMA_REG_ARRAY_INDEX(i),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) 				       EDMA_CHANNEL_BIT(i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) 			edma_setup_interrupt(&echan[i], true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) 			/* Set up channel -> slot mapping for the entry slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) 			edma_set_chmap(&echan[i], echan[i].slot[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) static const struct dev_pm_ops edma_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) 	SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) static struct platform_driver edma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) 	.probe		= edma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) 	.remove		= edma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) 		.name	= "edma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) 		.pm	= &edma_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) 		.of_match_table = edma_of_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) static int edma_tptc_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) 	pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) 	return pm_runtime_get_sync(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) static struct platform_driver edma_tptc_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) 	.probe		= edma_tptc_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) 		.name	= "edma3-tptc",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) 		.of_match_table = edma_tptc_of_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) static bool edma_filter_fn(struct dma_chan *chan, void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) 	bool match = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) 	if (chan->device->dev->driver == &edma_driver.driver) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) 		struct edma_chan *echan = to_edma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) 		unsigned ch_req = *(unsigned *)param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) 		if (ch_req == echan->ch_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) 			/* The channel is going to be used as HW synchronized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) 			echan->hw_triggered = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) 			match = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724) 	return match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) static int edma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) 	ret = platform_driver_register(&edma_tptc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) 	return platform_driver_register(&edma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) subsys_initcall(edma_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) static void __exit edma_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) 	platform_driver_unregister(&edma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742) 	platform_driver_unregister(&edma_tptc_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) module_exit(edma_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) MODULE_DESCRIPTION("TI EDMA DMA engine driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) MODULE_LICENSE("GPL v2");