Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Driver for the Cirrus Logic EP93xx DMA Controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2011 Mika Westerberg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * DMA M2P implementation is based on the original
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *   Copyright (C) 2006 Applied Data Systems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * This driver is based on dw_dmac and amba-pl08x drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <linux/mod_devicetable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/platform_data/dma-ep93xx.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) /* M2P registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define M2P_CONTROL			0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #define M2P_CONTROL_STALLINT		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define M2P_CONTROL_NFBINT		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define M2P_CONTROL_CH_ERROR_INT	BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define M2P_CONTROL_ENABLE		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define M2P_CONTROL_ICE			BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define M2P_INTERRUPT			0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define M2P_INTERRUPT_STALL		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define M2P_INTERRUPT_NFB		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define M2P_INTERRUPT_ERROR		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define M2P_PPALLOC			0x0008
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define M2P_STATUS			0x000c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define M2P_MAXCNT0			0x0020
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define M2P_BASE0			0x0024
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define M2P_MAXCNT1			0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define M2P_BASE1			0x0034
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define M2P_STATE_IDLE			0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define M2P_STATE_STALL			1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define M2P_STATE_ON			2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define M2P_STATE_NEXT			3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) /* M2M registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define M2M_CONTROL			0x0000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define M2M_CONTROL_DONEINT		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define M2M_CONTROL_ENABLE		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define M2M_CONTROL_START		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define M2M_CONTROL_DAH			BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define M2M_CONTROL_SAH			BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define M2M_CONTROL_PW_SHIFT		9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define M2M_CONTROL_PW_8		(0 << M2M_CONTROL_PW_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define M2M_CONTROL_PW_16		(1 << M2M_CONTROL_PW_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define M2M_CONTROL_PW_32		(2 << M2M_CONTROL_PW_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define M2M_CONTROL_PW_MASK		(3 << M2M_CONTROL_PW_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define M2M_CONTROL_TM_SHIFT		13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define M2M_CONTROL_TM_TX		(1 << M2M_CONTROL_TM_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define M2M_CONTROL_TM_RX		(2 << M2M_CONTROL_TM_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define M2M_CONTROL_NFBINT		BIT(21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define M2M_CONTROL_RSS_SHIFT		22
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define M2M_CONTROL_RSS_SSPRX		(1 << M2M_CONTROL_RSS_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define M2M_CONTROL_RSS_SSPTX		(2 << M2M_CONTROL_RSS_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define M2M_CONTROL_RSS_IDE		(3 << M2M_CONTROL_RSS_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define M2M_CONTROL_NO_HDSK		BIT(24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define M2M_CONTROL_PWSC_SHIFT		25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define M2M_INTERRUPT			0x0004
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) #define M2M_INTERRUPT_MASK		6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define M2M_STATUS			0x000c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define M2M_STATUS_CTL_SHIFT		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) #define M2M_STATUS_CTL_IDLE		(0 << M2M_STATUS_CTL_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define M2M_STATUS_CTL_STALL		(1 << M2M_STATUS_CTL_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define M2M_STATUS_CTL_MEMRD		(2 << M2M_STATUS_CTL_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define M2M_STATUS_CTL_MEMWR		(3 << M2M_STATUS_CTL_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #define M2M_STATUS_CTL_BWCWAIT		(4 << M2M_STATUS_CTL_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #define M2M_STATUS_CTL_MASK		(7 << M2M_STATUS_CTL_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define M2M_STATUS_BUF_SHIFT		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define M2M_STATUS_BUF_NO		(0 << M2M_STATUS_BUF_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) #define M2M_STATUS_BUF_ON		(1 << M2M_STATUS_BUF_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) #define M2M_STATUS_BUF_NEXT		(2 << M2M_STATUS_BUF_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) #define M2M_STATUS_BUF_MASK		(3 << M2M_STATUS_BUF_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #define M2M_STATUS_DONE			BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) #define M2M_BCR0			0x0010
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #define M2M_BCR1			0x0014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) #define M2M_SAR_BASE0			0x0018
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #define M2M_SAR_BASE1			0x001c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) #define M2M_DAR_BASE0			0x002c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) #define M2M_DAR_BASE1			0x0030
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #define DMA_MAX_CHAN_BYTES		0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) #define DMA_MAX_CHAN_DESCRIPTORS	32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) struct ep93xx_dma_engine;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 					 enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 					 struct dma_slave_config *config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  * @src_addr: source address of the transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  * @dst_addr: destination address of the transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  * @size: size of the transaction (in bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117)  * @complete: this descriptor is completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  * @txd: dmaengine API descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  * @tx_list: list of linked descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  * @node: link used for putting this into a channel queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) struct ep93xx_dma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	u32				src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	u32				dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	size_t				size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	bool				complete;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	struct dma_async_tx_descriptor	txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	struct list_head		tx_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	struct list_head		node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  * @chan: dmaengine API channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135)  * @edma: pointer to to the engine device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136)  * @regs: memory mapped registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137)  * @irq: interrupt number of the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138)  * @clk: clock used by this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139)  * @tasklet: channel specific tasklet used for callbacks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140)  * @lock: lock protecting the fields following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141)  * @flags: flags for the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  * @buffer: which buffer to use next (0/1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  * @active: flattened chain of descriptors currently being processed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  * @queue: pending descriptors which are handled next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  * @free_list: list of free descriptors which can be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  * @runtime_addr: physical address currently used as dest/src (M2M only). This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  *                is set via .device_config before slave operation is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148)  *                prepared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149)  * @runtime_ctrl: M2M runtime values for the control register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150)  * @slave_config: slave configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * As EP93xx DMA controller doesn't support real chained DMA descriptors we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  * will have slightly different scheme here: @active points to a head of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  * flattened DMA descriptor chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  * @queue holds pending transactions. These are linked through the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  * descriptor in the chain. When a descriptor is moved to the @active queue,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * the first and chained descriptors are flattened into a single list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  * @chan.private holds pointer to &struct ep93xx_dma_data which contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  * necessary channel configuration information. For memcpy channels this must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  * be %NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) struct ep93xx_dma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	struct dma_chan			chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	const struct ep93xx_dma_engine	*edma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	void __iomem			*regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	int				irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	struct clk			*clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	struct tasklet_struct		tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	/* protects the fields following */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	spinlock_t			lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	unsigned long			flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) /* Channel is configured for cyclic transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) #define EP93XX_DMA_IS_CYCLIC		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	int				buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	struct list_head		active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	struct list_head		queue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	struct list_head		free_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	u32				runtime_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	u32				runtime_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	struct dma_slave_config		slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  * struct ep93xx_dma_engine - the EP93xx DMA engine instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  * @dma_dev: holds the dmaengine device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  * @m2m: is this an M2M or M2P device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190)  * @hw_setup: method which sets the channel up for operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191)  * @hw_synchronize: synchronizes DMA channel termination to current context
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  * @hw_shutdown: shuts the channel down and flushes whatever is left
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  * @hw_submit: pushes active descriptor(s) to the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194)  * @hw_interrupt: handle the interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195)  * @num_channels: number of channels for this instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196)  * @channels: array of channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  * There is one instance of this struct for the M2P channels and one for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  * M2M channels. hw_xxx() methods are used to perform operations which are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  * different on M2M and M2P channels. These methods are called with channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201)  * lock held and interrupts disabled so they cannot sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) struct ep93xx_dma_engine {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	struct dma_device	dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	bool			m2m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	int			(*hw_setup)(struct ep93xx_dma_chan *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	void			(*hw_synchronize)(struct ep93xx_dma_chan *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	void			(*hw_shutdown)(struct ep93xx_dma_chan *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	void			(*hw_submit)(struct ep93xx_dma_chan *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	int			(*hw_interrupt)(struct ep93xx_dma_chan *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) #define INTERRUPT_UNKNOWN	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) #define INTERRUPT_DONE		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) #define INTERRUPT_NEXT_BUFFER	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	size_t			num_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	struct ep93xx_dma_chan	channels[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	return &edmac->chan.dev->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	return container_of(chan, struct ep93xx_dma_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  * ep93xx_dma_set_active - set new active descriptor chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231)  * @edmac: channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232)  * @desc: head of the new active descriptor chain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234)  * Sets @desc to be the head of the new active descriptor chain. This is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235)  * chain which is processed next. The active list must be empty before calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236)  * this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238)  * Called with @edmac->lock held and interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 				  struct ep93xx_dma_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	BUG_ON(!list_empty(&edmac->active));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	list_add_tail(&desc->node, &edmac->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	/* Flatten the @desc->tx_list chain into @edmac->active list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	while (!list_empty(&desc->tx_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 		struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 			struct ep93xx_dma_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 		 * We copy the callback parameters from the first descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		 * to all the chained descriptors. This way we can call the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		 * callback without having to find out the first descriptor in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 		 * the chain. Useful for cyclic transfers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 		d->txd.callback = desc->txd.callback;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		d->txd.callback_param = desc->txd.callback_param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		list_move_tail(&d->node, &edmac->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) /* Called with @edmac->lock held and interrupts disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) static struct ep93xx_dma_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	return list_first_entry_or_null(&edmac->active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 					struct ep93xx_dma_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274)  * ep93xx_dma_advance_active - advances to the next active descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275)  * @edmac: channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277)  * Function advances active descriptor to the next in the @edmac->active and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278)  * returns %true if we still have descriptors in the chain to process.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  * Otherwise returns %false.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  * When the channel is in cyclic mode always returns %true.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  * Called with @edmac->lock held and interrupts disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 	struct ep93xx_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	list_rotate_left(&edmac->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	desc = ep93xx_dma_get_active(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 	if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	 * If txd.cookie is set it means that we are back in the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 	 * descriptor in the chain and hence done with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	return !desc->txd.cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306)  * M2P DMA implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	writel(control, edmac->regs + M2P_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	 * EP93xx User's Guide states that we must perform a dummy read after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	 * write to the control register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 	readl(edmac->regs + M2P_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	struct ep93xx_dma_data *data = edmac->chan.private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 	u32 control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 	control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 		| M2P_CONTROL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	m2p_set_control(edmac, control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 	edmac->buffer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 	return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	u32 control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	spin_lock_irqsave(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	control = readl(edmac->regs + M2P_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	m2p_set_control(edmac, control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	spin_unlock_irqrestore(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	while (m2p_channel_state(edmac) >= M2P_STATE_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		schedule();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	m2p_set_control(edmac, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 		dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 	struct ep93xx_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	u32 bus_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	desc = ep93xx_dma_get_active(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		bus_addr = desc->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		bus_addr = desc->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 	if (edmac->buffer == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 		writel(desc->size, edmac->regs + M2P_MAXCNT0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 		writel(bus_addr, edmac->regs + M2P_BASE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		writel(desc->size, edmac->regs + M2P_MAXCNT1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 		writel(bus_addr, edmac->regs + M2P_BASE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	edmac->buffer ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	u32 control = readl(edmac->regs + M2P_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	m2p_fill_desc(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	control |= M2P_CONTROL_STALLINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	if (ep93xx_dma_advance_active(edmac)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		m2p_fill_desc(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		control |= M2P_CONTROL_NFBINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	m2p_set_control(edmac, control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	u32 control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	if (irq_status & M2P_INTERRUPT_ERROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 		struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		/* Clear the error interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		writel(1, edmac->regs + M2P_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 		 * It seems that there is no easy way of reporting errors back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		 * to client so we just report the error here and continue as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		 * usual.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		 * Revisit this when there is a mechanism to report back the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 		 * errors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 		dev_err(chan2dev(edmac),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 			"DMA transfer failed! Details:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 			"\tcookie	: %d\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 			"\tsrc_addr	: 0x%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 			"\tdst_addr	: 0x%08x\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 			"\tsize		: %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 			desc->txd.cookie, desc->src_addr, desc->dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 			desc->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	 * Even latest E2 silicon revision sometimes assert STALL interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	 * instead of NFB. Therefore we treat them equally, basing on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	 * amount of data we still have to transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		return INTERRUPT_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	if (ep93xx_dma_advance_active(edmac)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 		m2p_fill_desc(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 		return INTERRUPT_NEXT_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	/* Disable interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	control = readl(edmac->regs + M2P_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	m2p_set_control(edmac, control);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	return INTERRUPT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456)  * M2M DMA implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	const struct ep93xx_dma_data *data = edmac->chan.private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	u32 control = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		/* This is memcpy channel, nothing to configure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		writel(control, edmac->regs + M2M_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	switch (data->port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	case EP93XX_DMA_SSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		 * This was found via experimenting - anything less than 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		 * causes the channel to perform only a partial transfer which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		 * leads to problems since we don't get DONE interrupt then.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		control = (5 << M2M_CONTROL_PWSC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		control |= M2M_CONTROL_NO_HDSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 		if (data->direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 			control |= M2M_CONTROL_DAH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 			control |= M2M_CONTROL_TM_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 			control |= M2M_CONTROL_RSS_SSPTX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 			control |= M2M_CONTROL_SAH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 			control |= M2M_CONTROL_TM_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 			control |= M2M_CONTROL_RSS_SSPRX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	case EP93XX_DMA_IDE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 		 * This IDE part is totally untested. Values below are taken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 		 * from the EP93xx Users's Guide and might not be correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 		if (data->direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 			/* Worst case from the UG */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 			control = (3 << M2M_CONTROL_PWSC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 			control |= M2M_CONTROL_DAH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			control |= M2M_CONTROL_TM_TX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 			control = (2 << M2M_CONTROL_PWSC_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 			control |= M2M_CONTROL_SAH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 			control |= M2M_CONTROL_TM_RX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		control |= M2M_CONTROL_NO_HDSK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		control |= M2M_CONTROL_RSS_IDE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 		control |= M2M_CONTROL_PW_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	writel(control, edmac->regs + M2M_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	/* Just disable the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	writel(0, edmac->regs + M2M_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	struct ep93xx_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	desc = ep93xx_dma_get_active(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	if (edmac->buffer == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 		writel(desc->size, edmac->regs + M2M_BCR0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		writel(desc->size, edmac->regs + M2M_BCR1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	edmac->buffer ^= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	struct ep93xx_dma_data *data = edmac->chan.private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	u32 control = readl(edmac->regs + M2M_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	 * Since we allow clients to configure PW (peripheral width) we always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	 * clear PW bits here and then set them according what is given in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	 * the runtime configuration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	control &= ~M2M_CONTROL_PW_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	control |= edmac->runtime_ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	m2m_fill_desc(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	control |= M2M_CONTROL_DONEINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	if (ep93xx_dma_advance_active(edmac)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 		m2m_fill_desc(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 		control |= M2M_CONTROL_NFBINT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	 * Now we can finally enable the channel. For M2M channel this must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	 * done _after_ the BCRx registers are programmed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	control |= M2M_CONTROL_ENABLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	writel(control, edmac->regs + M2M_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		 * For memcpy channels the software trigger must be asserted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		 * in order to start the memcpy operation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		control |= M2M_CONTROL_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		writel(control, edmac->regs + M2M_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  * According to EP93xx User's Guide, we should receive DONE interrupt when all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  * M2M DMA controller transactions complete normally. This is not always the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591)  * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592)  * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593)  * In effect, disabling the channel when only DONE bit is set could stop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594)  * currently running DMA transfer. To avoid this, we use Buffer FSM and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595)  * Control FSM to check current state of DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	u32 status = readl(edmac->regs + M2M_STATUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	bool done = status & M2M_STATUS_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	bool last_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	u32 control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	struct ep93xx_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	/* Accept only DONE and NFB interrupts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		return INTERRUPT_UNKNOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	if (done) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		/* Clear the DONE bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		writel(0, edmac->regs + M2M_INTERRUPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	 * Check whether we are done with descriptors or not. This, together
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	 * with DMA channel state, determines action to take in interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	desc = ep93xx_dma_get_active(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	last_done = !desc || desc->txd.cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	 * Use M2M DMA Buffer FSM and Control FSM to check current state of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	 * DMA channel. Using DONE and NFB bits from channel status register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	 * or bits from channel interrupt register is not reliable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	if (!last_done &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	    (buf_fsm == M2M_STATUS_BUF_NO ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	     buf_fsm == M2M_STATUS_BUF_ON)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		 * Two buffers are ready for update when Buffer FSM is in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		 * DMA_NO_BUF state. Only one buffer can be prepared without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		 * disabling the channel or polling the DONE bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		 * To simplify things, always prepare only one buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		if (ep93xx_dma_advance_active(edmac)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 			m2m_fill_desc(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			if (done && !edmac->chan.private) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 				/* Software trigger for memcpy channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 				control = readl(edmac->regs + M2M_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 				control |= M2M_CONTROL_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 				writel(control, edmac->regs + M2M_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 			return INTERRUPT_NEXT_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 			last_done = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	 * and Control FSM is in DMA_STALL state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	if (last_done &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 	    buf_fsm == M2M_STATUS_BUF_NO &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 	    ctl_fsm == M2M_STATUS_CTL_STALL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		/* Disable interrupts and the channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		control = readl(edmac->regs + M2M_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 			    | M2M_CONTROL_ENABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		writel(control, edmac->regs + M2M_CONTROL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		return INTERRUPT_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	 * Nothing to do this time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 	return INTERRUPT_NEXT_BUFFER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673)  * DMA engine API implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) static struct ep93xx_dma_desc *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	struct ep93xx_dma_desc *desc, *_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	struct ep93xx_dma_desc *ret = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	spin_lock_irqsave(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 		if (async_tx_test_ack(&desc->txd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			list_del_init(&desc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			/* Re-initialize the descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 			desc->src_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 			desc->dst_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 			desc->size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 			desc->complete = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 			desc->txd.cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 			desc->txd.callback = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			desc->txd.callback_param = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 			ret = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	spin_unlock_irqrestore(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 				struct ep93xx_dma_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	if (desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 		unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 		spin_lock_irqsave(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		list_splice_init(&desc->tx_list, &edmac->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		list_add(&desc->node, &edmac->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		spin_unlock_irqrestore(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719)  * ep93xx_dma_advance_work - start processing the next pending transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720)  * @edmac: channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722)  * If we have pending transactions queued and we are currently idling, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723)  * function takes the next queued transaction from the @edmac->queue and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724)  * pushes it to the hardware for execution.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	struct ep93xx_dma_desc *new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	spin_lock_irqsave(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 		spin_unlock_irqrestore(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	/* Take the next descriptor from the pending queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	list_del_init(&new->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	ep93xx_dma_set_active(edmac, new);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	/* Push it to the hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	edmac->edma->hw_submit(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	spin_unlock_irqrestore(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) static void ep93xx_dma_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	struct ep93xx_dma_chan *edmac = from_tasklet(edmac, t, tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	struct ep93xx_dma_desc *desc, *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	struct dmaengine_desc_callback cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	memset(&cb, 0, sizeof(cb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	spin_lock_irq(&edmac->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	 * If dma_terminate_all() was called before we get to run, the active
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	 * list has become empty. If that happens we aren't supposed to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	 * anything more than call ep93xx_dma_advance_work().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	desc = ep93xx_dma_get_active(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	if (desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		if (desc->complete) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			/* mark descriptor complete for non cyclic case only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 				dma_cookie_complete(&desc->txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			list_splice_init(&edmac->active, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 		dmaengine_desc_get_callback(&desc->txd, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	spin_unlock_irq(&edmac->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	/* Pick up the next descriptor from the queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	ep93xx_dma_advance_work(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 	/* Now we can release all the chained descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	list_for_each_entry_safe(desc, d, &list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 		dma_descriptor_unmap(&desc->txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 		ep93xx_dma_desc_put(edmac, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	dmaengine_desc_callback_invoke(&cb, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	struct ep93xx_dma_chan *edmac = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	struct ep93xx_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	irqreturn_t ret = IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	spin_lock(&edmac->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	desc = ep93xx_dma_get_active(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 		dev_warn(chan2dev(edmac),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 			 "got interrupt while active list is empty\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		spin_unlock(&edmac->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 		return IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	switch (edmac->edma->hw_interrupt(edmac)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	case INTERRUPT_DONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 		desc->complete = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		tasklet_schedule(&edmac->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	case INTERRUPT_NEXT_BUFFER:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 			tasklet_schedule(&edmac->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		dev_warn(chan2dev(edmac), "unknown interrupt!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		ret = IRQ_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	spin_unlock(&edmac->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824)  * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825)  * @tx: descriptor to be executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827)  * Function will execute given descriptor on the hardware or if the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828)  * is busy, queue the descriptor to be executed later on. Returns cookie which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829)  * can be used to poll the status of the descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	struct ep93xx_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	spin_lock_irqsave(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	cookie = dma_cookie_assign(tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	desc = container_of(tx, struct ep93xx_dma_desc, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	 * If nothing is currently prosessed, we push this descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	 * directly to the hardware. Otherwise we put the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	 * to the pending queue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	if (list_empty(&edmac->active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		ep93xx_dma_set_active(edmac, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		edmac->edma->hw_submit(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		list_add_tail(&desc->node, &edmac->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	spin_unlock_irqrestore(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860)  * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861)  * @chan: channel to allocate resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863)  * Function allocates necessary resources for the given DMA channel and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864)  * returns number of allocated descriptors for the channel. Negative errno
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865)  * is returned in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	struct ep93xx_dma_data *data = chan->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	const char *name = dma_chan_name(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	/* Sanity check the channel parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 	if (!edmac->edma->m2m) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		if (data->port < EP93XX_DMA_I2S1 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 		    data->port > EP93XX_DMA_IRDA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		if (data->direction != ep93xx_dma_chan_direction(chan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		if (data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			switch (data->port) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			case EP93XX_DMA_SSP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 			case EP93XX_DMA_IDE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 				if (!is_slave_direction(data->direction))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 					return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 			default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 				return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 	if (data && data->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		name = data->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	ret = clk_enable(edmac->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 	ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 		goto fail_clk_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	spin_lock_irq(&edmac->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	dma_cookie_init(&edmac->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	ret = edmac->edma->hw_setup(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	spin_unlock_irq(&edmac->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		goto fail_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		struct ep93xx_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 			dev_warn(chan2dev(edmac), "not enough descriptors\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 		INIT_LIST_HEAD(&desc->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		dma_async_tx_descriptor_init(&desc->txd, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		desc->txd.flags = DMA_CTRL_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		desc->txd.tx_submit = ep93xx_dma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		ep93xx_dma_desc_put(edmac, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) fail_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	free_irq(edmac->irq, edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) fail_clk_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	clk_disable(edmac->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945)  * ep93xx_dma_free_chan_resources - release resources for the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946)  * @chan: channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948)  * Function releases all the resources allocated for the given channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949)  * The channel must be idle when this is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	struct ep93xx_dma_desc *desc, *d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	BUG_ON(!list_empty(&edmac->active));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	BUG_ON(!list_empty(&edmac->queue));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	spin_lock_irqsave(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 	edmac->edma->hw_shutdown(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	edmac->runtime_addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	edmac->runtime_ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	edmac->buffer = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	list_splice_init(&edmac->free_list, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	spin_unlock_irqrestore(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	list_for_each_entry_safe(desc, d, &list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 		kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 	clk_disable(edmac->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	free_irq(edmac->irq, edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977)  * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978)  * @chan: channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979)  * @dest: destination bus address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980)  * @src: source bus address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981)  * @len: size of the transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982)  * @flags: flags for the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984)  * Returns a valid DMA descriptor or %NULL in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 			   dma_addr_t src, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	struct ep93xx_dma_desc *desc, *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	size_t bytes, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	for (offset = 0; offset < len; offset += bytes) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		desc = ep93xx_dma_desc_get(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 		if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		desc->src_addr = src + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		desc->dst_addr = dest + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		desc->size = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 			first = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 			list_add_tail(&desc->node, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	first->txd.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	first->txd.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	return &first->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	ep93xx_dma_desc_put(edmac, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)  * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025)  * @chan: channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)  * @sgl: list of buffers to transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027)  * @sg_len: number of entries in @sgl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028)  * @dir: direction of tha DMA transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)  * @flags: flags for the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)  * @context: operation context (ignored)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)  * Returns a valid DMA descriptor or %NULL in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 			 unsigned int sg_len, enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 			 unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	struct ep93xx_dma_desc *desc, *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		dev_warn(chan2dev(edmac),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			 "channel was configured with different direction\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		dev_warn(chan2dev(edmac),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			 "channel is already used for cyclic transfers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 	ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 		size_t len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		if (len > DMA_MAX_CHAN_BYTES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 				 len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 		desc = ep93xx_dma_desc_get(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 		if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		if (dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			desc->src_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 			desc->dst_addr = edmac->runtime_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 			desc->src_addr = edmac->runtime_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 			desc->dst_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 		desc->size = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 		if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 			first = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 			list_add_tail(&desc->node, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	first->txd.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	first->txd.flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	return &first->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 	ep93xx_dma_desc_put(edmac, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)  * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)  * @chan: channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)  * @dma_addr: DMA mapped address of the buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)  * @buf_len: length of the buffer (in bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)  * @period_len: length of a single period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)  * @dir: direction of the operation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)  * @flags: tx descriptor status flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)  * Prepares a descriptor for cyclic DMA operation. This means that once the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)  * descriptor is submitted, we will be submitting in a @period_len sized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110)  * buffers and calling callback once the period has been elapsed. Transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)  * terminates only when client calls dmaengine_terminate_all() for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112)  * channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)  * Returns a valid DMA descriptor or %NULL in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 			   size_t buf_len, size_t period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 			   enum dma_transfer_direction dir, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	struct ep93xx_dma_desc *desc, *first;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	size_t offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 		dev_warn(chan2dev(edmac),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 			 "channel was configured with different direction\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		dev_warn(chan2dev(edmac),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 			 "channel is already used for cyclic transfers\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	if (period_len > DMA_MAX_CHAN_BYTES) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		dev_warn(chan2dev(edmac), "too big period length %zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			 period_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	/* Split the buffer into period size chunks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	first = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	for (offset = 0; offset < buf_len; offset += period_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		desc = ep93xx_dma_desc_get(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		if (!desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 			goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		if (dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 			desc->src_addr = dma_addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 			desc->dst_addr = edmac->runtime_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 			desc->src_addr = edmac->runtime_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 			desc->dst_addr = dma_addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 		desc->size = period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		if (!first)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 			first = desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 			list_add_tail(&desc->node, &first->tx_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	first->txd.cookie = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	return &first->txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	ep93xx_dma_desc_put(edmac, first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)  * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181)  * current context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182)  * @chan: channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184)  * Synchronizes the DMA channel termination to the current context. When this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185)  * function returns it is guaranteed that all transfers for previously issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186)  * descriptors have stopped and and it is safe to free the memory associated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)  * with them. Furthermore it is guaranteed that all complete callback functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)  * for a previously submitted descriptor have finished running and it is safe to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189)  * free resources accessed from within the complete callbacks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) static void ep93xx_dma_synchronize(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	if (edmac->edma->hw_synchronize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		edmac->edma->hw_synchronize(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200)  * ep93xx_dma_terminate_all - terminate all transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)  * @chan: channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)  * Stops all DMA transactions. All descriptors are put back to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204)  * @edmac->free_list and callbacks are _not_ called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static int ep93xx_dma_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	struct ep93xx_dma_desc *desc, *_d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	spin_lock_irqsave(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	/* First we disable and flush the DMA channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	edmac->edma->hw_shutdown(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 	list_splice_init(&edmac->active, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	list_splice_init(&edmac->queue, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	 * We then re-enable the channel. This way we can continue submitting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	 * the descriptors by just calling ->hw_submit() again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	edmac->edma->hw_setup(edmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 	spin_unlock_irqrestore(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 	list_for_each_entry_safe(desc, _d, &list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		ep93xx_dma_desc_put(edmac, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) static int ep93xx_dma_slave_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 				   struct dma_slave_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	memcpy(&edmac->slave_config, config, sizeof(*config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 					 enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 					 struct dma_slave_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	enum dma_slave_buswidth width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	u32 addr, ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	if (!edmac->edma->m2m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 	switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 		width = config->src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 		addr = config->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 		width = config->dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		addr = config->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 	switch (width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		ctrl = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 		ctrl = M2M_CONTROL_PW_16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 		ctrl = M2M_CONTROL_PW_32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	spin_lock_irqsave(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	edmac->runtime_addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	edmac->runtime_ctrl = ctrl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	spin_unlock_irqrestore(&edmac->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292)  * ep93xx_dma_tx_status - check if a transaction is completed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)  * @chan: channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294)  * @cookie: transaction specific cookie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295)  * @state: state of the transaction is stored here if given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297)  * This function can be used to query state of a given transaction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 					    dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 					    struct dma_tx_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	return dma_cookie_status(chan, cookie, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)  * ep93xx_dma_issue_pending - push pending transactions to the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)  * @chan: channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310)  * When this function is called, all pending transactions are pushed to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311)  * hardware and executed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) static void ep93xx_dma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) static int __init ep93xx_dma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 	struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 	struct ep93xx_dma_engine *edma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	struct dma_device *dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 	size_t edma_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	if (!edma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	dma_dev = &edma->dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	edma->m2m = platform_get_device_id(pdev)->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	edma->num_channels = pdata->num_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	INIT_LIST_HEAD(&dma_dev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	for (i = 0; i < pdata->num_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 		const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 		struct ep93xx_dma_chan *edmac = &edma->channels[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 		edmac->chan.device = dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 		edmac->regs = cdata->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 		edmac->irq = cdata->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 		edmac->edma = edma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 		edmac->clk = clk_get(NULL, cdata->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 		if (IS_ERR(edmac->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 			dev_warn(&pdev->dev, "failed to get clock for %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 				 cdata->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 		spin_lock_init(&edmac->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 		INIT_LIST_HEAD(&edmac->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 		INIT_LIST_HEAD(&edmac->queue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		INIT_LIST_HEAD(&edmac->free_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		tasklet_setup(&edmac->tasklet, ep93xx_dma_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		list_add_tail(&edmac->chan.device_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 			      &dma_dev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	dma_cap_zero(dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	dma_dev->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	dma_dev->device_config = ep93xx_dma_slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	dma_dev->device_synchronize = ep93xx_dma_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	dma_dev->device_tx_status = ep93xx_dma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	if (edma->m2m) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		edma->hw_setup = m2m_hw_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		edma->hw_shutdown = m2m_hw_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 		edma->hw_submit = m2m_hw_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		edma->hw_interrupt = m2m_hw_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		edma->hw_synchronize = m2p_hw_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		edma->hw_setup = m2p_hw_setup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		edma->hw_shutdown = m2p_hw_shutdown;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		edma->hw_submit = m2p_hw_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		edma->hw_interrupt = m2p_hw_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	ret = dma_async_device_register(dma_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 	if (unlikely(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 		for (i = 0; i < edma->num_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 			struct ep93xx_dma_chan *edmac = &edma->channels[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 			if (!IS_ERR_OR_NULL(edmac->clk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 				clk_put(edmac->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		kfree(edma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 			 edma->m2m ? "M" : "P");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) static const struct platform_device_id ep93xx_dma_driver_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	{ "ep93xx-dma-m2p", 0 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	{ "ep93xx-dma-m2m", 1 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) static struct platform_driver ep93xx_dma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	.driver		= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 		.name	= "ep93xx-dma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	.id_table	= ep93xx_dma_driver_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) static int __init ep93xx_dma_module_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) 	return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) subsys_initcall(ep93xx_dma_module_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) MODULE_DESCRIPTION("EP93xx DMA driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) MODULE_LICENSE("GPL");