Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * DMA controller driver for CSR SiRFprimaII
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/of_platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/sirfsoc_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define SIRFSOC_DMA_VER_A7V1                    1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #define SIRFSOC_DMA_VER_A7V2                    2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #define SIRFSOC_DMA_VER_A6                      4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #define SIRFSOC_DMA_DESCRIPTORS                 16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #define SIRFSOC_DMA_CHANNELS                    16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #define SIRFSOC_DMA_TABLE_NUM                   256
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #define SIRFSOC_DMA_CH_ADDR                     0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define SIRFSOC_DMA_CH_XLEN                     0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #define SIRFSOC_DMA_CH_YLEN                     0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #define SIRFSOC_DMA_CH_CTRL                     0x0C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #define SIRFSOC_DMA_WIDTH_0                     0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #define SIRFSOC_DMA_CH_VALID                    0x140
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #define SIRFSOC_DMA_CH_INT                      0x144
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define SIRFSOC_DMA_INT_EN                      0x148
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define SIRFSOC_DMA_INT_EN_CLR                  0x14C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #define SIRFSOC_DMA_CH_LOOP_CTRL                0x150
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR            0x154
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define SIRFSOC_DMA_WIDTH_ATLAS7                0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define SIRFSOC_DMA_VALID_ATLAS7                0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define SIRFSOC_DMA_INT_ATLAS7                  0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define SIRFSOC_DMA_INT_EN_ATLAS7               0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) #define SIRFSOC_DMA_LOOP_CTRL_ATLAS7            0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define SIRFSOC_DMA_CUR_DATA_ADDR               0x34
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) #define SIRFSOC_DMA_MUL_ATLAS7                  0x38
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7         0x158
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7     0x15C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define SIRFSOC_DMA_IOBG_SCMD_EN		0x800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define SIRFSOC_DMA_EARLY_RESP_SET		0x818
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define SIRFSOC_DMA_EARLY_RESP_CLR		0x81C
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define SIRFSOC_DMA_MODE_CTRL_BIT               4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define SIRFSOC_DMA_DIR_CTRL_BIT                5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define SIRFSOC_DMA_MODE_CTRL_BIT_ATLAS7        2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) #define SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7       3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7         4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define SIRFSOC_DMA_TAB_NUM_ATLAS7              7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define SIRFSOC_DMA_CHAIN_INT_BIT_ATLAS7        5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define SIRFSOC_DMA_CHAIN_FLAG_SHIFT_ATLAS7     25
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define SIRFSOC_DMA_CHAIN_ADDR_SHIFT            32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define SIRFSOC_DMA_INT_FINI_INT_ATLAS7         BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) #define SIRFSOC_DMA_INT_CNT_INT_ATLAS7          BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define SIRFSOC_DMA_INT_PAU_INT_ATLAS7          BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) #define SIRFSOC_DMA_INT_LOOP_INT_ATLAS7         BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) #define SIRFSOC_DMA_INT_INV_INT_ATLAS7          BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define SIRFSOC_DMA_INT_END_INT_ATLAS7          BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) #define SIRFSOC_DMA_INT_ALL_ATLAS7              0x3F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) /* xlen and dma_width register is in 4 bytes boundary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) #define SIRFSOC_DMA_WORD_LEN			4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) #define SIRFSOC_DMA_XLEN_MAX_V1         0x800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) #define SIRFSOC_DMA_XLEN_MAX_V2         0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) struct sirfsoc_dma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	struct dma_async_tx_descriptor	desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	struct list_head		node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	/* SiRFprimaII 2D-DMA parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	int             xlen;           /* DMA xlen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	int             ylen;           /* DMA ylen */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	int             width;          /* DMA width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	int             dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	bool            cyclic;         /* is loop DMA? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	bool            chain;          /* is chain DMA? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	u32             addr;		/* DMA buffer address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	u64 chain_table[SIRFSOC_DMA_TABLE_NUM]; /* chain tbl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) struct sirfsoc_dma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	struct dma_chan			chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	struct list_head		free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	struct list_head		prepared;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	struct list_head		queued;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	struct list_head		active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	struct list_head		completed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	unsigned long			happened_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	unsigned long			completed_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	/* Lock for this structure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	spinlock_t			lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	int				mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) struct sirfsoc_dma_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	u32				ctrl[SIRFSOC_DMA_CHANNELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	u32				interrupt_en;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) struct sirfsoc_dma {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	struct dma_device		dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	struct tasklet_struct		tasklet;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	struct sirfsoc_dma_chan		channels[SIRFSOC_DMA_CHANNELS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	void __iomem			*base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	int				irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	struct clk			*clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	int				type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	void (*exec_desc)(struct sirfsoc_dma_desc *sdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 		int cid, int burst_mode, void __iomem *base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	struct sirfsoc_dma_regs		regs_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) struct sirfsoc_dmadata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	void (*exec)(struct sirfsoc_dma_desc *sdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 		int cid, int burst_mode, void __iomem *base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) enum sirfsoc_dma_chain_flag {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	SIRFSOC_DMA_CHAIN_NORMAL = 0x01,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	SIRFSOC_DMA_CHAIN_PAUSE = 0x02,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	SIRFSOC_DMA_CHAIN_LOOP = 0x03,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	SIRFSOC_DMA_CHAIN_END = 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) #define DRV_NAME	"sirfsoc_dma"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) static int sirfsoc_dma_runtime_suspend(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) /* Convert struct dma_chan to struct sirfsoc_dma_chan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) static inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	return container_of(c, struct sirfsoc_dma_chan, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) /* Convert struct dma_chan to struct sirfsoc_dma */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) static void sirfsoc_dma_execute_hw_a7v2(struct sirfsoc_dma_desc *sdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 		int cid, int burst_mode, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	if (sdesc->chain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 		/* DMA v2 HW chain mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 		writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 			       (sdesc->chain <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 				SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 			       (0x8 << SIRFSOC_DMA_TAB_NUM_ATLAS7) | 0x3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 			       base + SIRFSOC_DMA_CH_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 		/* DMA v2 legacy mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 		writel_relaxed(sdesc->xlen, base + SIRFSOC_DMA_CH_XLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 		writel_relaxed(sdesc->ylen, base + SIRFSOC_DMA_CH_YLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 		writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_ATLAS7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 		writel_relaxed((sdesc->width*((sdesc->ylen+1)>>1)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 				base + SIRFSOC_DMA_MUL_ATLAS7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 		writel_relaxed((sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT_ATLAS7) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 			       (sdesc->chain <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 				SIRFSOC_DMA_CHAIN_CTRL_BIT_ATLAS7) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 			       0x3, base + SIRFSOC_DMA_CH_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	writel_relaxed(sdesc->chain ? SIRFSOC_DMA_INT_END_INT_ATLAS7 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		       (SIRFSOC_DMA_INT_FINI_INT_ATLAS7 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 			SIRFSOC_DMA_INT_LOOP_INT_ATLAS7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 		       base + SIRFSOC_DMA_INT_EN_ATLAS7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	writel(sdesc->addr, base + SIRFSOC_DMA_CH_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	if (sdesc->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		writel(0x10001, base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) static void sirfsoc_dma_execute_hw_a7v1(struct sirfsoc_dma_desc *sdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		int cid, int burst_mode, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	writel_relaxed(1, base + SIRFSOC_DMA_IOBG_SCMD_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	writel_relaxed((1 << cid), base + SIRFSOC_DMA_EARLY_RESP_SET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 	writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 		       (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 		       base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 		       (1 << cid), base + SIRFSOC_DMA_INT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	if (sdesc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		writel((1 << cid) | 1 << (cid + 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 		       readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 		       base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) static void sirfsoc_dma_execute_hw_a6(struct sirfsoc_dma_desc *sdesc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		int cid, int burst_mode, void __iomem *base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 	writel_relaxed(sdesc->width, base + SIRFSOC_DMA_WIDTH_0 + cid * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 	writel_relaxed(cid | (burst_mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		       (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		       base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 	writel_relaxed(sdesc->xlen, base + cid * 0x10 + SIRFSOC_DMA_CH_XLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	writel_relaxed(sdesc->ylen, base + cid * 0x10 + SIRFSOC_DMA_CH_YLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 	writel_relaxed(readl_relaxed(base + SIRFSOC_DMA_INT_EN) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) 		       (1 << cid), base + SIRFSOC_DMA_INT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	writel(sdesc->addr >> 2, base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	if (sdesc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 		writel((1 << cid) | 1 << (cid + 16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 		       readl_relaxed(base + SIRFSOC_DMA_CH_LOOP_CTRL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 		       base + SIRFSOC_DMA_CH_LOOP_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) /* Execute all queued DMA descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 	int cid = schan->chan.chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 	struct sirfsoc_dma_desc *sdesc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	 * lock has been held by functions calling this, so we don't hold
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 	 * lock again
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	base = sdma->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 	sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 				 node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	/* Move the first queued descriptor to active list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 	list_move_tail(&sdesc->node, &schan->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	if (sdma->type == SIRFSOC_DMA_VER_A7V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		cid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	/* Start the DMA transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	sdma->exec_desc(sdesc, cid, schan->mode, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 	if (sdesc->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		schan->happened_cyclic = schan->completed_cyclic = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) /* Interrupt handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	struct sirfsoc_dma *sdma = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	struct sirfsoc_dma_chan *schan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	struct sirfsoc_dma_desc *sdesc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	u32 is;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	bool chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	int ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	void __iomem *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	switch (sdma->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	case SIRFSOC_DMA_VER_A6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	case SIRFSOC_DMA_VER_A7V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 		reg = sdma->base + SIRFSOC_DMA_CH_INT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 		while ((ch = fls(is) - 1) >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 			is &= ~(1 << ch);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 			writel_relaxed(1 << ch, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 			schan = &sdma->channels[ch];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 			spin_lock(&schan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 			sdesc = list_first_entry(&schan->active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 						 struct sirfsoc_dma_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 			if (!sdesc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 				/* Execute queued descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 				list_splice_tail_init(&schan->active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 						      &schan->completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 				dma_cookie_complete(&sdesc->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 				if (!list_empty(&schan->queued))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 					sirfsoc_dma_execute(schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 			} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 				schan->happened_cyclic++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 			spin_unlock(&schan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 	case SIRFSOC_DMA_VER_A7V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		is = readl(sdma->base + SIRFSOC_DMA_INT_ATLAS7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		reg = sdma->base + SIRFSOC_DMA_INT_ATLAS7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		schan = &sdma->channels[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		spin_lock(&schan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		sdesc = list_first_entry(&schan->active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 					 struct sirfsoc_dma_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		if (!sdesc->cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 			chain = sdesc->chain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 			if ((chain && (is & SIRFSOC_DMA_INT_END_INT_ATLAS7)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 				(!chain &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 				(is & SIRFSOC_DMA_INT_FINI_INT_ATLAS7))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 				/* Execute queued descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 				list_splice_tail_init(&schan->active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 						      &schan->completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 				dma_cookie_complete(&sdesc->desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 				if (!list_empty(&schan->queued))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 					sirfsoc_dma_execute(schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 		} else if (sdesc->cyclic && (is &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 					SIRFSOC_DMA_INT_LOOP_INT_ATLAS7))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 			schan->happened_cyclic++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 		spin_unlock(&schan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	/* Schedule tasklet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	tasklet_schedule(&sdma->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) /* process completed descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 	dma_cookie_t last_cookie = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	struct sirfsoc_dma_chan *schan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	struct sirfsoc_dma_desc *sdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	struct dma_async_tx_descriptor *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	unsigned long happened_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	LIST_HEAD(list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	for (i = 0; i < sdma->dma.chancnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		schan = &sdma->channels[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		/* Get all completed descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		spin_lock_irqsave(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		if (!list_empty(&schan->completed)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 			list_splice_tail_init(&schan->completed, &list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 			spin_unlock_irqrestore(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 			/* Execute callbacks and run dependencies */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 			list_for_each_entry(sdesc, &list, node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 				desc = &sdesc->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 				dmaengine_desc_get_callback_invoke(desc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 				last_cookie = desc->cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 				dma_run_dependencies(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 			/* Free descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			spin_lock_irqsave(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 			list_splice_tail_init(&list, &schan->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 			schan->chan.completed_cookie = last_cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 			spin_unlock_irqrestore(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 			if (list_empty(&schan->active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 				spin_unlock_irqrestore(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 			/* for cyclic channel, desc is always in active list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 			sdesc = list_first_entry(&schan->active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 				struct sirfsoc_dma_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 			/* cyclic DMA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 			happened_cyclic = schan->happened_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 			spin_unlock_irqrestore(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 			desc = &sdesc->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 			while (happened_cyclic != schan->completed_cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 				dmaengine_desc_get_callback_invoke(desc, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 				schan->completed_cyclic++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) /* DMA Tasklet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) static void sirfsoc_dma_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	struct sirfsoc_dma *sdma = from_tasklet(sdma, t, tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 	sirfsoc_dma_process_completed(sdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) /* Submit descriptor to hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 	struct sirfsoc_dma_desc *sdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	dma_cookie_t cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	spin_lock_irqsave(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 	/* Move descriptor to queue */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 	list_move_tail(&sdesc->node, &schan->queued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 	cookie = dma_cookie_assign(txd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	spin_unlock_irqrestore(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	return cookie;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) static int sirfsoc_dma_slave_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 				    struct dma_slave_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		(config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	spin_lock_irqsave(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	schan->mode = (config->src_maxburst == 4 ? 1 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	spin_unlock_irqrestore(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) static int sirfsoc_dma_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	int cid = schan->chan.chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	spin_lock_irqsave(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	switch (sdma->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	case SIRFSOC_DMA_VER_A7V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 		writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 		writel_relaxed((1 << cid) | 1 << (cid + 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 			       sdma->base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 			       SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	case SIRFSOC_DMA_VER_A7V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		writel_relaxed(0, sdma->base + SIRFSOC_DMA_INT_EN_ATLAS7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		writel_relaxed(SIRFSOC_DMA_INT_ALL_ATLAS7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			       sdma->base + SIRFSOC_DMA_INT_ATLAS7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 		writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		writel_relaxed(0, sdma->base + SIRFSOC_DMA_VALID_ATLAS7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	case SIRFSOC_DMA_VER_A6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			       ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		writel_relaxed(readl_relaxed(sdma->base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 					     SIRFSOC_DMA_CH_LOOP_CTRL) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 			       ~((1 << cid) | 1 << (cid + 16)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 			       sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	list_splice_tail_init(&schan->active, &schan->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	list_splice_tail_init(&schan->queued, &schan->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	spin_unlock_irqrestore(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) static int sirfsoc_dma_pause_chan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	int cid = schan->chan.chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 	spin_lock_irqsave(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	switch (sdma->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 	case SIRFSOC_DMA_VER_A7V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 		writel_relaxed((1 << cid) | 1 << (cid + 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 			       sdma->base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 			       SIRFSOC_DMA_CH_LOOP_CTRL_CLR_ATLAS7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	case SIRFSOC_DMA_VER_A7V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		writel_relaxed(0, sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	case SIRFSOC_DMA_VER_A6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 		writel_relaxed(readl_relaxed(sdma->base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 					     SIRFSOC_DMA_CH_LOOP_CTRL) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 			       ~((1 << cid) | 1 << (cid + 16)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 			       sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	spin_unlock_irqrestore(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) static int sirfsoc_dma_resume_chan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	int cid = schan->chan.chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	spin_lock_irqsave(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	switch (sdma->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	case SIRFSOC_DMA_VER_A7V1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		writel_relaxed((1 << cid) | 1 << (cid + 16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 			       sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_ATLAS7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	case SIRFSOC_DMA_VER_A7V2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 		writel_relaxed(0x10001,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			       sdma->base + SIRFSOC_DMA_LOOP_CTRL_ATLAS7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	case SIRFSOC_DMA_VER_A6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		writel_relaxed(readl_relaxed(sdma->base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 					     SIRFSOC_DMA_CH_LOOP_CTRL) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			       ((1 << cid) | 1 << (cid + 16)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			       sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	spin_unlock_irqrestore(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) /* Alloc channel resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	struct sirfsoc_dma_desc *sdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	LIST_HEAD(descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	pm_runtime_get_sync(sdma->dma.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	/* Alloc descriptors for this channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 		sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 		if (!sdesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 			dev_notice(sdma->dma.dev, "Memory allocation error. "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 				"Allocated only %u descriptors\n", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 		dma_async_tx_descriptor_init(&sdesc->desc, chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		sdesc->desc.flags = DMA_CTRL_ACK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 		sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		list_add_tail(&sdesc->node, &descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	/* Return error only if no descriptors were allocated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	spin_lock_irqsave(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	list_splice_tail_init(&descs, &schan->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	spin_unlock_irqrestore(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) /* Free channel resources */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	struct sirfsoc_dma_desc *sdesc, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	LIST_HEAD(descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 	spin_lock_irqsave(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	/* Channel must be idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	BUG_ON(!list_empty(&schan->prepared));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	BUG_ON(!list_empty(&schan->queued));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	BUG_ON(!list_empty(&schan->active));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	BUG_ON(!list_empty(&schan->completed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	/* Move data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	list_splice_tail_init(&schan->free, &descs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	spin_unlock_irqrestore(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	/* Free descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	list_for_each_entry_safe(sdesc, tmp, &descs, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 		kfree(sdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	pm_runtime_put(sdma->dma.dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) /* Send pending descriptor to hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	spin_lock_irqsave(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	if (list_empty(&schan->active) && !list_empty(&schan->queued))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		sirfsoc_dma_execute(schan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	spin_unlock_irqrestore(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) /* Check request completion status */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) static enum dma_status
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	struct sirfsoc_dma_desc *sdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 	int cid = schan->chan.chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 	unsigned long dma_pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	unsigned long dma_request_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 	unsigned long residue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	spin_lock_irqsave(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	if (list_empty(&schan->active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		ret = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		dma_set_residue(txstate, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		spin_unlock_irqrestore(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 	sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	if (sdesc->cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		dma_request_bytes = (sdesc->xlen + 1) * (sdesc->ylen + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 			(sdesc->width * SIRFSOC_DMA_WORD_LEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 		dma_request_bytes = sdesc->xlen * SIRFSOC_DMA_WORD_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 	ret = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	if (sdma->type == SIRFSOC_DMA_VER_A7V2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		cid = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		dma_pos = readl_relaxed(sdma->base + SIRFSOC_DMA_CUR_DATA_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		dma_pos = readl_relaxed(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	residue = dma_request_bytes - (dma_pos - sdesc->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	dma_set_residue(txstate, residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	spin_unlock_irqrestore(&schan->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	struct dma_chan *chan, struct dma_interleaved_template *xt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	struct sirfsoc_dma_desc *sdesc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		goto err_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	/* Get free descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	spin_lock_irqsave(&schan->lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	if (!list_empty(&schan->free)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 		sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 			node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 		list_del(&sdesc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	spin_unlock_irqrestore(&schan->lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	if (!sdesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		/* try to free completed descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 		sirfsoc_dma_process_completed(sdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 		goto no_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	/* Place descriptor in prepared list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 	spin_lock_irqsave(&schan->lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	 * Number of chunks in a frame can only be 1 for prima2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	 * and ylen (number of frame - 1) must be at least 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	if ((xt->frame_size == 1) && (xt->numf > 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 		sdesc->cyclic = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 		sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 		sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 				SIRFSOC_DMA_WORD_LEN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		sdesc->ylen = xt->numf - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		if (xt->dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 			sdesc->addr = xt->src_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 			sdesc->dir = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 			sdesc->addr = xt->dst_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			sdesc->dir = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 		list_add_tail(&sdesc->node, &schan->prepared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 		pr_err("sirfsoc DMA Invalid xfer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		goto err_xfer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	spin_unlock_irqrestore(&schan->lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	return &sdesc->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) err_xfer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	spin_unlock_irqrestore(&schan->lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) no_desc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) err_dir:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 	return ERR_PTR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	size_t buf_len, size_t period_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	enum dma_transfer_direction direction, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	struct sirfsoc_dma_desc *sdesc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	unsigned long iflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	 * we only support cycle transfer with 2 period
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	 * If the X-length is set to 0, it would be the loop mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	 * The DMA address keeps increasing until reaching the end of a loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 	 * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	 * the DMA address goes back to the beginning of this area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	 * In loop mode, the DMA data region is divided into two parts, BUFA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	 * and BUFB. DMA controller generates interrupts twice in each loop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	 * when the DMA address reaches the end of BUFA or the end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	 * BUFB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	if (buf_len !=  2 * period_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	/* Get free descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 	spin_lock_irqsave(&schan->lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	if (!list_empty(&schan->free)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 		sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 			node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 		list_del(&sdesc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	spin_unlock_irqrestore(&schan->lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 	if (!sdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	/* Place descriptor in prepared list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	spin_lock_irqsave(&schan->lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 	sdesc->addr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	sdesc->cyclic = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 	sdesc->xlen = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	sdesc->width = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	list_add_tail(&sdesc->node, &schan->prepared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	spin_unlock_irqrestore(&schan->lock, iflags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	return &sdesc->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805)  * The DMA controller consists of 16 independent DMA channels.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806)  * Each channel is allocated to a different function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	unsigned int ch_nr = (unsigned int) chan_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	if (ch_nr == chan->chan_id +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) EXPORT_SYMBOL(sirfsoc_dma_filter_id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) #define SIRFSOC_DMA_BUSWIDTHS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	(BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 	BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 	BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 	struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 	struct sirfsoc_dma *sdma = ofdma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	unsigned int request = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	if (request >= SIRFSOC_DMA_CHANNELS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	return dma_get_slave_channel(&sdma->channels[request].chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) static int sirfsoc_dma_probe(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	struct device_node *dn = op->dev.of_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	struct device *dev = &op->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	struct dma_device *dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 	struct sirfsoc_dma *sdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	struct sirfsoc_dma_chan *schan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	struct sirfsoc_dmadata *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	struct resource res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	ulong regs_start, regs_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 	u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	if (!sdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	data = (struct sirfsoc_dmadata *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		(of_match_device(op->dev.driver->of_match_table,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 				 &op->dev)->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	sdma->exec_desc = data->exec;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 	sdma->type = data->type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 	if (of_property_read_u32(dn, "cell-index", &id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		dev_err(dev, "Fail to get DMAC index\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	sdma->irq = irq_of_parse_and_map(dn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	if (!sdma->irq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		dev_err(dev, "Error mapping IRQ!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	sdma->clk = devm_clk_get(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 	if (IS_ERR(sdma->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		dev_err(dev, "failed to get a clock.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		return PTR_ERR(sdma->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	ret = of_address_to_resource(dn, 0, &res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		dev_err(dev, "Error parsing memory region!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		goto irq_dispose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 	regs_start = res.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 	regs_size = resource_size(&res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 	sdma->base = devm_ioremap(dev, regs_start, regs_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 	if (!sdma->base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 		dev_err(dev, "Error mapping memory region!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		goto irq_dispose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 		dev_err(dev, "Error requesting IRQ!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 		ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 		goto irq_dispose;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 	dma = &sdma->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 	dma->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 	dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 	dma->device_issue_pending = sirfsoc_dma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 	dma->device_config = sirfsoc_dma_slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	dma->device_pause = sirfsoc_dma_pause_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	dma->device_resume = sirfsoc_dma_resume_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	dma->device_terminate_all = sirfsoc_dma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	dma->device_tx_status = sirfsoc_dma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 	dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 	INIT_LIST_HEAD(&dma->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	dma_cap_set(DMA_SLAVE, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	dma_cap_set(DMA_CYCLIC, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	dma_cap_set(DMA_PRIVATE, dma->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	for (i = 0; i < SIRFSOC_DMA_CHANNELS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 		schan = &sdma->channels[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 		schan->chan.device = dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		dma_cookie_init(&schan->chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		INIT_LIST_HEAD(&schan->free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		INIT_LIST_HEAD(&schan->prepared);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		INIT_LIST_HEAD(&schan->queued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 		INIT_LIST_HEAD(&schan->active);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 		INIT_LIST_HEAD(&schan->completed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 		spin_lock_init(&schan->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 		list_add_tail(&schan->chan.device_node, &dma->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	tasklet_setup(&sdma->tasklet, sirfsoc_dma_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	/* Register DMA engine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	dev_set_drvdata(dev, sdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	ret = dma_async_device_register(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		goto free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	/* Device-tree DMA controller registration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 	ret = of_dma_controller_register(dn, of_dma_sirfsoc_xlate, sdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 		dev_err(dev, "failed to register DMA controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 		goto unreg_dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	pm_runtime_enable(&op->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	dev_info(dev, "initialized SIRFSOC DMAC driver\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) unreg_dma_dev:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	dma_async_device_unregister(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	free_irq(sdma->irq, sdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) irq_dispose:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 	irq_dispose_mapping(sdma->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) static int sirfsoc_dma_remove(struct platform_device *op)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	struct device *dev = &op->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 	of_dma_controller_free(op->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	dma_async_device_unregister(&sdma->dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	free_irq(sdma->irq, sdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	tasklet_kill(&sdma->tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	irq_dispose_mapping(sdma->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	pm_runtime_disable(&op->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	if (!pm_runtime_status_suspended(&op->dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		sirfsoc_dma_runtime_suspend(&op->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) static int __maybe_unused sirfsoc_dma_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 	clk_disable_unprepare(sdma->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) static int __maybe_unused sirfsoc_dma_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 	ret = clk_prepare_enable(sdma->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		dev_err(dev, "clk_enable failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) static int __maybe_unused sirfsoc_dma_pm_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 	struct sirfsoc_dma_regs *save = &sdma->regs_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 	struct sirfsoc_dma_chan *schan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 	int ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 	u32 int_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	 * if we were runtime-suspended before, resume to enable clock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	 * before accessing register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	if (pm_runtime_status_suspended(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		ret = sirfsoc_dma_runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		int_offset = SIRFSOC_DMA_INT_EN_ATLAS7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		count = SIRFSOC_DMA_CHANNELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		int_offset = SIRFSOC_DMA_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	 * DMA controller will lose all registers while suspending
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	 * so we need to save registers for active channels
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	for (ch = 0; ch < count; ch++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		schan = &sdma->channels[ch];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		if (list_empty(&schan->active))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 		save->ctrl[ch] = readl_relaxed(sdma->base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 	save->interrupt_en = readl_relaxed(sdma->base + int_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	/* Disable clock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	sirfsoc_dma_runtime_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) static int __maybe_unused sirfsoc_dma_pm_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 	struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	struct sirfsoc_dma_regs *save = &sdma->regs_save;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	struct sirfsoc_dma_desc *sdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 	struct sirfsoc_dma_chan *schan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	int ch;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 	u32 int_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	u32 width_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	/* Enable clock before accessing register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	ret = sirfsoc_dma_runtime_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 		count = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 		int_offset = SIRFSOC_DMA_INT_EN_ATLAS7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 		width_offset = SIRFSOC_DMA_WIDTH_ATLAS7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 		count = SIRFSOC_DMA_CHANNELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 		int_offset = SIRFSOC_DMA_INT_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		width_offset = SIRFSOC_DMA_WIDTH_0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 	writel_relaxed(save->interrupt_en, sdma->base + int_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	for (ch = 0; ch < count; ch++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 		schan = &sdma->channels[ch];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		if (list_empty(&schan->active))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 		sdesc = list_first_entry(&schan->active,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 			struct sirfsoc_dma_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		writel_relaxed(sdesc->width,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 			sdma->base + width_offset + ch * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 		writel_relaxed(sdesc->xlen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_XLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		writel_relaxed(sdesc->ylen,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_YLEN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		writel_relaxed(save->ctrl[ch],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 			sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_CTRL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 		if (sdma->type == SIRFSOC_DMA_VER_A7V2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) 			writel_relaxed(sdesc->addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 				sdma->base + SIRFSOC_DMA_CH_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 			writel_relaxed(sdesc->addr >> 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 				sdma->base + ch * 0x10 + SIRFSOC_DMA_CH_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	/* if we were runtime-suspended before, suspend again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	if (pm_runtime_status_suspended(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		sirfsoc_dma_runtime_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_dma_pm_suspend, sirfsoc_dma_pm_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static struct sirfsoc_dmadata sirfsoc_dmadata_a6 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	.exec = sirfsoc_dma_execute_hw_a6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	.type = SIRFSOC_DMA_VER_A6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) static struct sirfsoc_dmadata sirfsoc_dmadata_a7v1 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 	.exec = sirfsoc_dma_execute_hw_a7v1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 	.type = SIRFSOC_DMA_VER_A7V1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) static struct sirfsoc_dmadata sirfsoc_dmadata_a7v2 = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	.exec = sirfsoc_dma_execute_hw_a7v2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	.type = SIRFSOC_DMA_VER_A7V2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) static const struct of_device_id sirfsoc_dma_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 	{ .compatible = "sirf,prima2-dmac", .data = &sirfsoc_dmadata_a6,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 	{ .compatible = "sirf,atlas7-dmac", .data = &sirfsoc_dmadata_a7v1,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	{ .compatible = "sirf,atlas7-dmac-v2", .data = &sirfsoc_dmadata_a7v2,},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	{},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) MODULE_DEVICE_TABLE(of, sirfsoc_dma_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) static struct platform_driver sirfsoc_dma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 	.probe		= sirfsoc_dma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 	.remove		= sirfsoc_dma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 		.name = DRV_NAME,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 		.pm = &sirfsoc_dma_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 		.of_match_table	= sirfsoc_dma_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) static __init int sirfsoc_dma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	return platform_driver_register(&sirfsoc_dma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static void __exit sirfsoc_dma_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 	platform_driver_unregister(&sirfsoc_dma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) subsys_initcall(sirfsoc_dma_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) module_exit(sirfsoc_dma_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) MODULE_DESCRIPTION("SIRFSOC DMA control driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) MODULE_LICENSE("GPL v2");