Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * S3C24XX DMA handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * based on amba-pl08x.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * Copyright (c) 2006 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * Copyright (c) 2010 ST-Ericsson SA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  * Author: Peter Pearse <peter.pearse@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * Author: Linus Walleij <linus.walleij@stericsson.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * that can be routed to any of the 4 to 8 hardware-channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  * Therefore on these DMA controllers the number of channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * and the number of incoming DMA signals are two totally different things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * It is usually not possible to theoretically handle all physical signals,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * so a multiplexing scheme with possible denial of use is necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * Open items:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  * - bursts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/mod_devicetable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/platform_data/dma-s3c24xx.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include "virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define MAX_DMA_CHANNELS	8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define S3C24XX_DISRC			0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define S3C24XX_DISRCC			0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define S3C24XX_DISRCC_INC_INCREMENT	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define S3C24XX_DISRCC_INC_FIXED	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define S3C24XX_DISRCC_LOC_AHB		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define S3C24XX_DISRCC_LOC_APB		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define S3C24XX_DIDST			0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define S3C24XX_DIDSTC			0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define S3C24XX_DIDSTC_INC_INCREMENT	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define S3C24XX_DIDSTC_INC_FIXED	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define S3C24XX_DIDSTC_LOC_AHB		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define S3C24XX_DIDSTC_LOC_APB		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define S3C24XX_DIDSTC_INT_TC0		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define S3C24XX_DIDSTC_INT_RELOAD	BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define S3C24XX_DCON			0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define S3C24XX_DCON_TC_MASK		0xfffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define S3C24XX_DCON_DSZ_BYTE		(0 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define S3C24XX_DCON_DSZ_HALFWORD	(1 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define S3C24XX_DCON_DSZ_WORD		(2 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define S3C24XX_DCON_DSZ_MASK		(3 << 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define S3C24XX_DCON_DSZ_SHIFT		20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define S3C24XX_DCON_AUTORELOAD		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define S3C24XX_DCON_NORELOAD		BIT(22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define S3C24XX_DCON_HWTRIG		BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define S3C24XX_DCON_HWSRC_SHIFT	24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define S3C24XX_DCON_SERV_SINGLE	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define S3C24XX_DCON_SERV_WHOLE		BIT(27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define S3C24XX_DCON_TSZ_UNIT		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define S3C24XX_DCON_TSZ_BURST4		BIT(28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) #define S3C24XX_DCON_INT		BIT(29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) #define S3C24XX_DCON_SYNC_PCLK		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define S3C24XX_DCON_SYNC_HCLK		BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define S3C24XX_DCON_DEMAND		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define S3C24XX_DCON_HANDSHAKE		BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) #define S3C24XX_DSTAT			0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) #define S3C24XX_DSTAT_STAT_BUSY		BIT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) #define S3C24XX_DSTAT_CURRTC_MASK	0xfffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) #define S3C24XX_DMASKTRIG		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) #define S3C24XX_DMASKTRIG_SWTRIG	BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) #define S3C24XX_DMASKTRIG_ON		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) #define S3C24XX_DMASKTRIG_STOP		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define S3C24XX_DMAREQSEL		0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define S3C24XX_DMAREQSEL_HW		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94)  * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95)  * for a DMA source. Instead only specific channels are valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96)  * All of these SoCs have 4 physical channels and the number of request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97)  * source bits is 3. Additionally we also need 1 bit to mark the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98)  * as valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99)  * Therefore we separate the chansel element of the channel data into 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100)  * parts of 4 bits each, to hold the information if the channel is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101)  * and the hw request source to use.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103)  * Example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104)  * SDI is valid on channels 0, 2 and 3 - with varying hw request sources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105)  * For it the chansel field would look like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107)  * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108)  * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109)  * ((BIT(3) | 2) << 0 * 4)   // channel 0, with request source 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) #define S3C24XX_CHANSEL_WIDTH		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) #define S3C24XX_CHANSEL_VALID		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) #define S3C24XX_CHANSEL_REQ_MASK	7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  * struct soc_data - vendor-specific config parameters for individual SoCs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117)  * @stride: spacing between the registers of each channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118)  * @has_reqsel: does the controller use the newer requestselection mechanism
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119)  * @has_clocks: are controllable dma-clocks present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) struct soc_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	int stride;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	bool has_reqsel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	bool has_clocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128)  * enum s3c24xx_dma_chan_state - holds the virtual channel states
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129)  * @S3C24XX_DMA_CHAN_IDLE: the channel is idle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130)  * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131)  * channel and is running a transfer on it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132)  * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133)  * channel to become available (only pertains to memcpy channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) enum s3c24xx_dma_chan_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	S3C24XX_DMA_CHAN_IDLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	S3C24XX_DMA_CHAN_RUNNING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	S3C24XX_DMA_CHAN_WAITING,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142)  * struct s3c24xx_sg - structure containing data per sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143)  * @src_addr: src address of sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144)  * @dst_addr: dst address of sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145)  * @len: transfer len in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146)  * @node: node for txd's dsg_list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) struct s3c24xx_sg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	dma_addr_t src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	dma_addr_t dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  * @vd: virtual DMA descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  * @dsg_list: list of children sg's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * @at: sg currently being transfered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  * @width: transfer width
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161)  * @disrcc: value for source control register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162)  * @didstc: value for destination control register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163)  * @dcon: base value for dcon register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164)  * @cyclic: indicate cyclic transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) struct s3c24xx_txd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	struct virt_dma_desc vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	struct list_head dsg_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	struct list_head *at;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	u8 width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 	u32 disrcc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	u32 didstc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	u32 dcon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	bool cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) struct s3c24xx_dma_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180)  * struct s3c24xx_dma_phy - holder for the physical channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181)  * @id: physical index to this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182)  * @valid: does the channel have all required elements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183)  * @base: virtual memory base (remapped) for the this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184)  * @irq: interrupt for this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185)  * @clk: clock for this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186)  * @lock: a lock to use when altering an instance of this struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187)  * @serving: virtual channel currently being served by this physicalchannel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188)  * @host: a pointer to the host (internal use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) struct s3c24xx_dma_phy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	unsigned int			id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	bool				valid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	void __iomem			*base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	int				irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	struct clk			*clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	spinlock_t			lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	struct s3c24xx_dma_chan		*serving;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	struct s3c24xx_dma_engine	*host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202)  * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203)  * @id: the id of the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204)  * @name: name of the channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205)  * @vc: wrappped virtual channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206)  * @phy: the physical channel utilized by this channel, if there is one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207)  * @runtime_addr: address for RX/TX according to the runtime config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208)  * @at: active transaction on this channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209)  * @lock: a lock for this channel data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210)  * @host: a pointer to the host (internal use)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211)  * @state: whether the channel is idle, running etc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212)  * @slave: whether this channel is a device (slave) or for memcpy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) struct s3c24xx_dma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 	struct virt_dma_chan vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	struct s3c24xx_dma_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	struct dma_slave_config cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 	struct s3c24xx_txd *at;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	struct s3c24xx_dma_engine *host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	enum s3c24xx_dma_chan_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	bool slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  * struct s3c24xx_dma_engine - the local state holder for the S3C24XX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  * @pdev: the corresponding platform device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229)  * @pdata: platform data passed in from the platform/machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230)  * @base: virtual memory base (remapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231)  * @slave: slave engine for this instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232)  * @memcpy: memcpy engine for this instance
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233)  * @phy_chans: array of data for the physical channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) struct s3c24xx_dma_engine {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 	struct platform_device			*pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	const struct s3c24xx_dma_platdata	*pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	struct soc_data				*sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	void __iomem				*base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	struct dma_device			slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	struct dma_device			memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 	struct s3c24xx_dma_phy			*phy_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246)  * Physical channel handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250)  * Check whether a certain channel is busy or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	unsigned int val = readl(phy->base + S3C24XX_DSTAT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 	return val & S3C24XX_DSTAT_STAT_BUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 				  struct s3c24xx_dma_phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 	struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	int phyvalid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	/* every phy is valid for memcopy channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	if (!s3cchan->slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	/* On newer variants all phys can be used for all virtual channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	if (s3cdma->sdata->has_reqsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279)  * Allocate a physical channel for a virtual channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281)  * Try to locate a physical channel to be used for this transfer. If all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282)  * are taken return NULL and the requester will have to cope by using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283)  * some fallback PIO mode or retrying later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	struct s3c24xx_dma_phy *phy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		phy = &s3cdma->phy_chans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		if (!phy->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		if (!s3c24xx_dma_phy_valid(s3cchan, phy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		spin_lock_irqsave(&phy->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		if (!phy->serving) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 			phy->serving = s3cchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 			spin_unlock_irqrestore(&phy->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		spin_unlock_irqrestore(&phy->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	/* No physical channel available, cope with it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	if (i == s3cdma->pdata->num_phy_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		dev_warn(&s3cdma->pdev->dev, "no phy channel available\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	/* start the phy clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	if (s3cdma->sdata->has_clocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		ret = clk_enable(phy->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 			dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 				phy->id, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 			phy->serving = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	return phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  * Mark the physical channel as free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  * This drops the link between the physical and virtual channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	struct s3c24xx_dma_engine *s3cdma = phy->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	if (s3cdma->sdata->has_clocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 		clk_disable(phy->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	phy->serving = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350)  * Stops the channel by writing the stop bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351)  * This should not be used for an on-going transfer, but as a method of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352)  * shutting down a channel (eg, when it's no longer used) or terminating a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353)  * transfer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361)  * Virtual channel handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	return container_of(chan, struct s3c24xx_dma_chan, vc.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	struct s3c24xx_dma_phy *phy = s3cchan->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 	struct s3c24xx_txd *txd = s3cchan->at;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	return tc * txd->width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 				  struct dma_slave_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	/* Reject definitely invalid configurations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 	if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	    config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	spin_lock_irqsave(&s3cchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	if (!s3cchan->slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	s3cchan->cfg = *config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406)  * Transfer handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	return container_of(tx, struct s3c24xx_txd, vd.tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) static struct s3c24xx_txd *s3c24xx_dma_get_txd(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	if (txd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 		INIT_LIST_HEAD(&txd->dsg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 		txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 	return txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	struct s3c24xx_sg *dsg, *_dsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		list_del(&dsg->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		kfree(dsg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	kfree(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 				       struct s3c24xx_txd *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	struct s3c24xx_dma_phy *phy = s3cchan->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	u32 dcon = txd->dcon;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	/* transfer-size and -count from len and width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	switch (txd->width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	if (s3cchan->slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		struct s3c24xx_dma_channel *cdata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 					&pdata->channels[s3cchan->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		if (s3cdma->sdata->has_reqsel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 			writel_relaxed((cdata->chansel << 1) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 							S3C24XX_DMAREQSEL_HW,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 					phy->base + S3C24XX_DMAREQSEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 			int csel = cdata->chansel >> (phy->id *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 							S3C24XX_CHANSEL_WIDTH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 			csel &= S3C24XX_CHANSEL_REQ_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 			dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 			dcon |= S3C24XX_DCON_HWTRIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 		if (s3cdma->sdata->has_reqsel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 			writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	writel_relaxed(dcon, phy->base + S3C24XX_DCON);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	val &= ~S3C24XX_DMASKTRIG_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	val |= S3C24XX_DMASKTRIG_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	/* trigger the dma operation for memcpy transfers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	if (!s3cchan->slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 		val |= S3C24XX_DMASKTRIG_SWTRIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	writel(val, phy->base + S3C24XX_DMASKTRIG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501)  * Set the initial DMA register values and start first sg.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	struct s3c24xx_dma_phy *phy = s3cchan->phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	list_del(&txd->vd.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	s3cchan->at = txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	/* Wait for channel inactive */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	while (s3c24xx_dma_phy_busy(phy))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 		cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	/* point to the first element of the sg list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 	txd->at = txd->dsg_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	s3c24xx_dma_start_next_sg(s3cchan, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523)  * Try to allocate a physical channel.  When successful, assign it to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524)  * this virtual channel, and initiate the next descriptor.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525)  * virtual channel lock must be held at this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	struct s3c24xx_dma_phy *phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	phy = s3c24xx_dma_get_phy(s3cchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	if (!phy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 			s3cchan->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		s3cchan->state = S3C24XX_DMA_CHAN_WAITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		phy->id, s3cchan->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 	s3cchan->phy = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	s3c24xx_dma_start_next_txd(s3cchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	struct s3c24xx_dma_chan *s3cchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 		phy->id, s3cchan->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	 * We do this without taking the lock; we're really only concerned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	 * about whether this pointer is NULL or not, and we're guaranteed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	 * that this will only be called when it _already_ is non-NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	phy->serving = s3cchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	s3cchan->phy = phy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 	s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	s3c24xx_dma_start_next_txd(s3cchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569)  * Free a physical DMA channel, potentially reallocating it to another
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570)  * virtual channel if we have any pending.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 	struct s3c24xx_dma_chan *p, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	next = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	/* Find a waiting virtual channel for the next transfer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		if (p->state == S3C24XX_DMA_CHAN_WAITING) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 			next = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	if (!next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 		list_for_each_entry(p, &s3cdma->slave.channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 				    vc.chan.device_node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 			if (p->state == S3C24XX_DMA_CHAN_WAITING &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 				      s3c24xx_dma_phy_valid(p, s3cchan->phy)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 				next = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	/* Ensure that the physical channel is stopped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	s3c24xx_dma_terminate_phy(s3cchan->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	if (next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 		bool success;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		 * Eww.  We know this isn't going to deadlock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 		 * but lockdep probably doesn't.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 		spin_lock(&next->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		/* Re-check the state now that we have the lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		success = next->state == S3C24XX_DMA_CHAN_WAITING;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		if (success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 			s3c24xx_dma_phy_reassign_start(s3cchan->phy, next);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		spin_unlock(&next->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		/* If the state changed, try to find another channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 		if (!success)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 			goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		/* No more jobs, so free up the physical channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		s3c24xx_dma_put_phy(s3cchan->phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	s3cchan->phy = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	if (!s3cchan->slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 		dma_descriptor_unmap(&vd->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	s3c24xx_dma_free_txd(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 	struct s3c24xx_dma_phy *phy = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 	struct s3c24xx_dma_chan *s3cchan = phy->serving;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	struct s3c24xx_txd *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	 * Interrupts happen to notify the completion of a transfer and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	 * channel should have moved into its stop state already on its own.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	 * Therefore interrupts on channels not bound to a virtual channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	 * should never happen. Nevertheless send a terminate command to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	 * channel if the unlikely case happens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	if (unlikely(!s3cchan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 			phy->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		s3c24xx_dma_terminate_phy(phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	spin_lock(&s3cchan->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	txd = s3cchan->at;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	if (txd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 		/* when more sg's are in this txd, start the next one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 		if (!list_is_last(txd->at, &txd->dsg_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 			txd->at = txd->at->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 			if (txd->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 				vchan_cyclic_callback(&txd->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 			s3c24xx_dma_start_next_sg(s3cchan, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 		} else if (!txd->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 			s3cchan->at = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 			vchan_cookie_complete(&txd->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 			 * And start the next descriptor (if any),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 			 * otherwise free this channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			if (vchan_next_desc(&s3cchan->vc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 				s3c24xx_dma_start_next_txd(s3cchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 				s3c24xx_dma_phy_free(s3cchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 			vchan_cyclic_callback(&txd->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 			/* Cyclic: reset at beginning */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 			txd->at = txd->dsg_list.next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 			s3c24xx_dma_start_next_sg(s3cchan, txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	spin_unlock(&s3cchan->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696)  * The DMA ENGINE API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	spin_lock_irqsave(&s3cchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	if (!s3cchan->phy && !s3cchan->at) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 			s3cchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		goto unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	/* Mark physical channel as free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	if (s3cchan->phy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 		s3c24xx_dma_phy_free(s3cchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	/* Dequeue current job */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	if (s3cchan->at) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 		vchan_terminate_vdesc(&s3cchan->at->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 		s3cchan->at = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	/* Dequeue jobs not yet fired as well */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	vchan_get_all_descriptors(&s3cchan->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 	spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	vchan_dma_desc_free_list(&s3cchan->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) static void s3c24xx_dma_synchronize(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	vchan_synchronize(&s3cchan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	/* Ensure all queued descriptors are freed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 	vchan_free_chan_resources(to_virt_chan(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		dma_cookie_t cookie, struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	struct s3c24xx_txd *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	struct s3c24xx_sg *dsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	size_t bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	spin_lock_irqsave(&s3cchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	ret = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	 * There's no point calculating the residue if there's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	 * no txstate to store the value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	if (ret == DMA_COMPLETE || !txstate) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 		spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	vd = vchan_find_desc(&s3cchan->vc, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	if (vd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		/* On the issued list, so hasn't been processed yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		txd = to_s3c24xx_txd(&vd->tx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 		list_for_each_entry(dsg, &txd->dsg_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 			bytes += dsg->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		 * Currently running, so sum over the pending sg's and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		 * the currently active one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		txd = s3cchan->at;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 		dsg = list_entry(txd->at, struct s3c24xx_sg, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 		list_for_each_entry_from(dsg, &txd->dsg_list, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 			bytes += dsg->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		bytes += s3c24xx_dma_getbytes_chan(s3cchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	 * This cookie not complete yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	 * Get number of bytes left in the active transactions and queue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	dma_set_residue(txstate, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	/* Whether waiting or running, we're in progress */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813)  * Initialize a descriptor to be used by memcpy submit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	struct s3c24xx_txd *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	struct s3c24xx_sg *dsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	int src_mod, dest_mod;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %zu bytes from %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			len, s3cchan->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	if ((len & S3C24XX_DCON_TC_MASK) != len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 		dev_err(&s3cdma->pdev->dev, "memcpy size %zu to large\n", len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	txd = s3c24xx_dma_get_txd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	if (!txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	if (!dsg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		s3c24xx_dma_free_txd(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	list_add_tail(&dsg->node, &txd->dsg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	dsg->src_addr = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	dsg->dst_addr = dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	dsg->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	 * Determine a suitable transfer width.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	 * The DMA controller cannot fetch/store information which is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	 * naturally aligned on the bus, i.e., a 4 byte fetch must start at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	 * an address divisible by 4 - more generally addr % width must be 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	src_mod = src % 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 	dest_mod = dest % 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	switch (len % 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 	case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		txd->width = ((src_mod == 2 || src_mod == 0) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			      (dest_mod == 2 || dest_mod == 0)) ? 2 : 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		txd->width = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 	txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		     S3C24XX_DCON_SERV_WHOLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	enum dma_transfer_direction direction, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	struct s3c24xx_txd *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	struct s3c24xx_sg *dsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 	unsigned sg_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	dma_addr_t slave_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	u32 hwcfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	dev_dbg(&s3cdma->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 		"prepare cyclic transaction of %zu bytes with period %zu from %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 		size, period, s3cchan->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	if (!is_slave_direction(direction)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		dev_err(&s3cdma->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			"direction %d unsupported\n", direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	txd = s3c24xx_dma_get_txd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	if (!txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	txd->cyclic = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	if (cdata->handshake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		txd->dcon |= S3C24XX_DCON_HANDSHAKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	switch (cdata->bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	case S3C24XX_DMA_APB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 		hwcfg |= S3C24XX_DISRCC_LOC_APB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	case S3C24XX_DMA_AHB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 		txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 		hwcfg |= S3C24XX_DISRCC_LOC_AHB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	 * Always assume our peripheral desintation is a fixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	 * address in memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	hwcfg |= S3C24XX_DISRCC_INC_FIXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 	 * Individual dma operations are requested by the slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	 * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 			      S3C24XX_DISRCC_INC_INCREMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		txd->didstc = hwcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		slave_addr = s3cchan->cfg.dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 		txd->width = s3cchan->cfg.dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		txd->disrcc = hwcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 			      S3C24XX_DIDSTC_INC_INCREMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		slave_addr = s3cchan->cfg.src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		txd->width = s3cchan->cfg.src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 	sg_len = size / period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	for (i = 0; i < sg_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 		dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 		if (!dsg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 			s3c24xx_dma_free_txd(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 		list_add_tail(&dsg->node, &txd->dsg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		dsg->len = period;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		/* Check last period length */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		if (i == sg_len - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 			dsg->len = size - period * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 			dsg->src_addr = addr + period * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 			dsg->dst_addr = slave_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 		} else { /* DMA_DEV_TO_MEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 			dsg->src_addr = slave_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			dsg->dst_addr = addr + period * i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		unsigned int sg_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	struct s3c24xx_txd *txd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 	struct s3c24xx_sg *dsg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	dma_addr_t slave_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	u32 hwcfg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 	int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 			sg_dma_len(sgl), s3cchan->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	txd = s3c24xx_dma_get_txd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	if (!txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	if (cdata->handshake)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		txd->dcon |= S3C24XX_DCON_HANDSHAKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 	switch (cdata->bus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	case S3C24XX_DMA_APB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 		txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		hwcfg |= S3C24XX_DISRCC_LOC_APB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	case S3C24XX_DMA_AHB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		hwcfg |= S3C24XX_DISRCC_LOC_AHB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	 * Always assume our peripheral desintation is a fixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	 * address in memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	hwcfg |= S3C24XX_DISRCC_INC_FIXED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	 * Individual dma operations are requested by the slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	 * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 			      S3C24XX_DISRCC_INC_INCREMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		txd->didstc = hwcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		slave_addr = s3cchan->cfg.dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		txd->width = s3cchan->cfg.dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	} else if (direction == DMA_DEV_TO_MEM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		txd->disrcc = hwcfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			      S3C24XX_DIDSTC_INC_INCREMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		slave_addr = s3cchan->cfg.src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		txd->width = s3cchan->cfg.src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		s3c24xx_dma_free_txd(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		dev_err(&s3cdma->pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			"direction %d unsupported\n", direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	for_each_sg(sgl, sg, sg_len, tmp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 		if (!dsg) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			s3c24xx_dma_free_txd(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 		list_add_tail(&dsg->node, &txd->dsg_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		dsg->len = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			dsg->src_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			dsg->dst_addr = slave_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 		} else { /* DMA_DEV_TO_MEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			dsg->src_addr = slave_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			dsg->dst_addr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064)  * Slave transactions callback to the slave device to allow
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065)  * synchronization of slave DMA signals with the DMAC enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) static void s3c24xx_dma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 	spin_lock_irqsave(&s3cchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	if (vchan_issue_pending(&s3cchan->vc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 			s3c24xx_dma_phy_alloc_and_start(s3cchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)  * Bringup and teardown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)  * Initialise the DMAC memcpy/slave channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086)  * Make a local wrapper to hold required data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		struct dma_device *dmadev, unsigned int channels, bool slave)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	struct s3c24xx_dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	INIT_LIST_HEAD(&dmadev->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 	 * Register as many many memcpy as we have physical channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 	 * we won't always be able to use all but the code will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	 * to cope with that situation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 	for (i = 0; i < channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 		if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		chan->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 		chan->host = s3cdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 		chan->state = S3C24XX_DMA_CHAN_IDLE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 		if (slave) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 			chan->slave = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 			chan->name = kasprintf(GFP_KERNEL, "slave%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 			if (!chan->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 			chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 			if (!chan->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 		dev_dbg(dmadev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 			 "initialize virtual channel \"%s\"\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 			 chan->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		chan->vc.desc_free = s3c24xx_dma_desc_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		vchan_init(&chan->vc, dmadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	dev_info(dmadev->dev, "initialized %d virtual %s channels\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 		 i, slave ? "slave" : "memcpy");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	struct s3c24xx_dma_chan *chan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 	struct s3c24xx_dma_chan *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	list_for_each_entry_safe(chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 				 next, &dmadev->channels, vc.chan.device_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 		list_del(&chan->vc.chan.device_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 		tasklet_kill(&chan->vc.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) /* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) static struct soc_data soc_s3c2410 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	.stride = 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	.has_reqsel = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 	.has_clocks = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) /* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) static struct soc_data soc_s3c2412 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 	.stride = 0x40,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	.has_reqsel = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	.has_clocks = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) /* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static struct soc_data soc_s3c2443 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	.stride = 0x100,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	.has_reqsel = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	.has_clocks = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) static const struct platform_device_id s3c24xx_dma_driver_ids[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		.name		= "s3c2410-dma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		.driver_data	= (kernel_ulong_t)&soc_s3c2410,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	}, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 		.name		= "s3c2412-dma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		.driver_data	= (kernel_ulong_t)&soc_s3c2412,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 	}, {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 		.name		= "s3c2443-dma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 		.driver_data	= (kernel_ulong_t)&soc_s3c2443,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	{ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	return (struct soc_data *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 			 platform_get_device_id(pdev)->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) static int s3c24xx_dma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	struct s3c24xx_dma_engine *s3cdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	struct soc_data *sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	if (!pdata) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 		dev_err(&pdev->dev, "platform data missing\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	/* Basic sanity check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		dev_err(&pdev->dev, "too many dma channels %d, max %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 			pdata->num_phy_channels, MAX_DMA_CHANNELS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	sdata = s3c24xx_dma_get_soc_data(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	if (!sdata)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	if (!s3cdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	s3cdma->pdev = pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 	s3cdma->pdata = pdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	s3cdma->sdata = sdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	s3cdma->base = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	if (IS_ERR(s3cdma->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		return PTR_ERR(s3cdma->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 	s3cdma->phy_chans = devm_kcalloc(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 					      pdata->num_phy_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 					      sizeof(struct s3c24xx_dma_phy),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 					      GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 	if (!s3cdma->phy_chans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	/* acquire irqs and clocks for all physical channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	for (i = 0; i < pdata->num_phy_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		char clk_name[6];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		phy->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		phy->base = s3cdma->base + (i * sdata->stride);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 		phy->host = s3cdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		phy->irq = platform_get_irq(pdev, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 		if (phy->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 				       0, pdev->name, phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 		if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 			dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 				i, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 		if (sdata->has_clocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 			sprintf(clk_name, "dma.%d", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 			phy->clk = devm_clk_get(&pdev->dev, clk_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 			if (IS_ERR(phy->clk) && sdata->has_clocks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 				dev_err(&pdev->dev, "unable to acquire clock for channel %d, error %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 					i, PTR_ERR(phy->clk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 			ret = clk_prepare(phy->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 				dev_err(&pdev->dev, "clock for phy %d failed, error %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 					i, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		spin_lock_init(&phy->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		phy->valid = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		dev_dbg(&pdev->dev, "physical channel %d is %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 			i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	/* Initialize memcpy engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	s3cdma->memcpy.dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	s3cdma->memcpy.device_free_chan_resources =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 					s3c24xx_dma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 	s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	s3cdma->memcpy.device_synchronize = s3c24xx_dma_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	/* Initialize slave engine for SoC internal dedicated peripherals */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	s3cdma->slave.dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 	s3cdma->slave.device_free_chan_resources =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 					s3c24xx_dma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 	s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 	s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 	s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 	s3cdma->slave.device_synchronize = s3c24xx_dma_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	s3cdma->slave.filter.map = pdata->slave_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	s3cdma->slave.filter.mapcnt = pdata->slavecnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	s3cdma->slave.filter.fn = s3c24xx_dma_filter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	/* Register as many memcpy channels as there are physical channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 						pdata->num_phy_channels, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 	if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 			 "%s failed to enumerate memcpy channels - %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 			 __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		goto err_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	/* Register slave channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 				pdata->num_channels, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	if (ret <= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 			"%s failed to enumerate slave channels - %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 				__func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		goto err_slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	ret = dma_async_device_register(&s3cdma->memcpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 		dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 			"%s failed to register memcpy as an async device - %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 			__func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		goto err_memcpy_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	ret = dma_async_device_register(&s3cdma->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 		dev_warn(&pdev->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 			"%s failed to register slave as an async device - %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 			__func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 		goto err_slave_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	platform_set_drvdata(pdev, s3cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 		 pdata->num_phy_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) err_slave_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 	dma_async_device_unregister(&s3cdma->memcpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) err_memcpy_reg:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) err_slave:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) err_memcpy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	if (sdata->has_clocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 		for (i = 0; i < pdata->num_phy_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 			struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 			if (phy->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 				clk_unprepare(phy->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) static void s3c24xx_dma_free_irq(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 				struct s3c24xx_dma_engine *s3cdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 		struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		devm_free_irq(&pdev->dev, phy->irq, phy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) static int s3c24xx_dma_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	dma_async_device_unregister(&s3cdma->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 	dma_async_device_unregister(&s3cdma->memcpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	s3c24xx_dma_free_irq(pdev, s3cdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 	s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	if (sdata->has_clocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		for (i = 0; i < pdata->num_phy_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 			struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 			if (phy->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 				clk_unprepare(phy->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) static struct platform_driver s3c24xx_dma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	.driver		= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 		.name	= "s3c24xx-dma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	.id_table	= s3c24xx_dma_driver_ids,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 	.probe		= s3c24xx_dma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 	.remove		= s3c24xx_dma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) module_platform_driver(s3c24xx_dma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) bool s3c24xx_dma_filter(struct dma_chan *chan, void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	struct s3c24xx_dma_chan *s3cchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	if (chan->device->dev->driver != &s3c24xx_dma_driver.driver)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	s3cchan = to_s3c24xx_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	return s3cchan->id == (uintptr_t)param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) EXPORT_SYMBOL(s3c24xx_dma_filter);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) MODULE_DESCRIPTION("S3C24XX DMA Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) MODULE_AUTHOR("Heiko Stuebner");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) MODULE_LICENSE("GPL v2");