Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * QCOM BAM DMA engine driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  * QCOM BAM DMA blocks are distributed amongst a number of the on-chip
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  * peripherals on the MSM 8x74.  The configuration of the channels are dependent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  * on the way they are hard wired to that specific peripheral.  The peripheral
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11)  * device tree entries specify the configuration of each channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13)  * The DMA controller requires the use of external memory for storage of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14)  * hardware descriptors for each channel.  The descriptor FIFO is accessed as a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15)  * circular buffer and operations are managed according to the offset within the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16)  * FIFO.  After pipe/channel reset, all of the pipe registers and internal state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17)  * are back to defaults.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19)  * During DMA operations, we write descriptors to the FIFO, being careful to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20)  * handle wrapping and then write the last FIFO offset to that channel's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21)  * P_EVNT_REG register to kick off the transaction.  The P_SW_OFSTS register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22)  * indicates the current FIFO offset that is being processed, so there is some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23)  * indication of where the hardware is currently working.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) #include <linux/of_address.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) #include <linux/of_irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40) #include <linux/circ_buf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) #include <linux/pm_runtime.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #include "../dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #include "../virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) struct bam_desc_hw {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	__le32 addr;		/* Buffer physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	__le16 size;		/* Buffer size in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	__le16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) #define BAM_DMA_AUTOSUSPEND_DELAY 100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define DESC_FLAG_INT BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define DESC_FLAG_EOT BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) #define DESC_FLAG_EOB BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define DESC_FLAG_NWD BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define DESC_FLAG_CMD BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) struct bam_async_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	struct virt_dma_desc vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	u32 num_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	u32 xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	/* transaction flags, EOT|EOB|NWD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	u16 flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	struct bam_desc_hw *curr_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 	/* list node for the desc in the bam_chan list of descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	struct list_head desc_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	enum dma_transfer_direction dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	size_t length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	struct bam_desc_hw desc[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) enum bam_reg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 	BAM_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) 	BAM_REVISION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	BAM_NUM_PIPES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) 	BAM_DESC_CNT_TRSHLD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 	BAM_IRQ_SRCS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 	BAM_IRQ_SRCS_MSK,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) 	BAM_IRQ_SRCS_UNMASKED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 	BAM_IRQ_STTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) 	BAM_IRQ_CLR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) 	BAM_IRQ_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 	BAM_CNFG_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 	BAM_IRQ_SRCS_EE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) 	BAM_IRQ_SRCS_MSK_EE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	BAM_P_CTRL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 	BAM_P_RST,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	BAM_P_HALT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	BAM_P_IRQ_STTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 	BAM_P_IRQ_CLR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 	BAM_P_IRQ_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	BAM_P_EVNT_DEST_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	BAM_P_EVNT_REG,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	BAM_P_SW_OFSTS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	BAM_P_DATA_FIFO_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	BAM_P_DESC_FIFO_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	BAM_P_EVNT_GEN_TRSHLD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	BAM_P_FIFO_SIZES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) struct reg_offset_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	u32 base_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 	unsigned int pipe_mult, evnt_mult, ee_mult;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) static const struct reg_offset_data bam_v1_3_reg_info[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	[BAM_CTRL]		= { 0x0F80, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 	[BAM_REVISION]		= { 0x0F84, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	[BAM_NUM_PIPES]		= { 0x0FBC, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) 	[BAM_DESC_CNT_TRSHLD]	= { 0x0F88, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	[BAM_IRQ_SRCS]		= { 0x0F8C, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 	[BAM_IRQ_SRCS_MSK]	= { 0x0F90, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	[BAM_IRQ_SRCS_UNMASKED]	= { 0x0FB0, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	[BAM_IRQ_STTS]		= { 0x0F94, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	[BAM_IRQ_CLR]		= { 0x0F98, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	[BAM_IRQ_EN]		= { 0x0F9C, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	[BAM_CNFG_BITS]		= { 0x0FFC, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	[BAM_IRQ_SRCS_EE]	= { 0x1800, 0x00, 0x00, 0x80 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	[BAM_IRQ_SRCS_MSK_EE]	= { 0x1804, 0x00, 0x00, 0x80 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 	[BAM_P_CTRL]		= { 0x0000, 0x80, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 	[BAM_P_RST]		= { 0x0004, 0x80, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	[BAM_P_HALT]		= { 0x0008, 0x80, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	[BAM_P_IRQ_STTS]	= { 0x0010, 0x80, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	[BAM_P_IRQ_CLR]		= { 0x0014, 0x80, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	[BAM_P_IRQ_EN]		= { 0x0018, 0x80, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	[BAM_P_EVNT_DEST_ADDR]	= { 0x102C, 0x00, 0x40, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	[BAM_P_EVNT_REG]	= { 0x1018, 0x00, 0x40, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	[BAM_P_SW_OFSTS]	= { 0x1000, 0x00, 0x40, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	[BAM_P_DATA_FIFO_ADDR]	= { 0x1024, 0x00, 0x40, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	[BAM_P_DESC_FIFO_ADDR]	= { 0x101C, 0x00, 0x40, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	[BAM_P_EVNT_GEN_TRSHLD]	= { 0x1028, 0x00, 0x40, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	[BAM_P_FIFO_SIZES]	= { 0x1020, 0x00, 0x40, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) static const struct reg_offset_data bam_v1_4_reg_info[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	[BAM_CTRL]		= { 0x0000, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 	[BAM_REVISION]		= { 0x0004, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 	[BAM_NUM_PIPES]		= { 0x003C, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	[BAM_DESC_CNT_TRSHLD]	= { 0x0008, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	[BAM_IRQ_SRCS]		= { 0x000C, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	[BAM_IRQ_SRCS_MSK]	= { 0x0010, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	[BAM_IRQ_SRCS_UNMASKED]	= { 0x0030, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	[BAM_IRQ_STTS]		= { 0x0014, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 	[BAM_IRQ_CLR]		= { 0x0018, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) 	[BAM_IRQ_EN]		= { 0x001C, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	[BAM_CNFG_BITS]		= { 0x007C, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 	[BAM_IRQ_SRCS_EE]	= { 0x0800, 0x00, 0x00, 0x80 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	[BAM_IRQ_SRCS_MSK_EE]	= { 0x0804, 0x00, 0x00, 0x80 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 	[BAM_P_CTRL]		= { 0x1000, 0x1000, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 	[BAM_P_RST]		= { 0x1004, 0x1000, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	[BAM_P_HALT]		= { 0x1008, 0x1000, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	[BAM_P_IRQ_STTS]	= { 0x1010, 0x1000, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	[BAM_P_IRQ_CLR]		= { 0x1014, 0x1000, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	[BAM_P_IRQ_EN]		= { 0x1018, 0x1000, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 	[BAM_P_EVNT_DEST_ADDR]	= { 0x182C, 0x00, 0x1000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	[BAM_P_EVNT_REG]	= { 0x1818, 0x00, 0x1000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	[BAM_P_SW_OFSTS]	= { 0x1800, 0x00, 0x1000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	[BAM_P_DATA_FIFO_ADDR]	= { 0x1824, 0x00, 0x1000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	[BAM_P_DESC_FIFO_ADDR]	= { 0x181C, 0x00, 0x1000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	[BAM_P_EVNT_GEN_TRSHLD]	= { 0x1828, 0x00, 0x1000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	[BAM_P_FIFO_SIZES]	= { 0x1820, 0x00, 0x1000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) static const struct reg_offset_data bam_v1_7_reg_info[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 	[BAM_CTRL]		= { 0x00000, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 	[BAM_REVISION]		= { 0x01000, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	[BAM_NUM_PIPES]		= { 0x01008, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	[BAM_DESC_CNT_TRSHLD]	= { 0x00008, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 	[BAM_IRQ_SRCS]		= { 0x03010, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 	[BAM_IRQ_SRCS_MSK]	= { 0x03014, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 	[BAM_IRQ_SRCS_UNMASKED]	= { 0x03018, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 	[BAM_IRQ_STTS]		= { 0x00014, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 	[BAM_IRQ_CLR]		= { 0x00018, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	[BAM_IRQ_EN]		= { 0x0001C, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 	[BAM_CNFG_BITS]		= { 0x0007C, 0x00, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 	[BAM_IRQ_SRCS_EE]	= { 0x03000, 0x00, 0x00, 0x1000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 	[BAM_IRQ_SRCS_MSK_EE]	= { 0x03004, 0x00, 0x00, 0x1000 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 	[BAM_P_CTRL]		= { 0x13000, 0x1000, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 	[BAM_P_RST]		= { 0x13004, 0x1000, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	[BAM_P_HALT]		= { 0x13008, 0x1000, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 	[BAM_P_IRQ_STTS]	= { 0x13010, 0x1000, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 	[BAM_P_IRQ_CLR]		= { 0x13014, 0x1000, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 	[BAM_P_IRQ_EN]		= { 0x13018, 0x1000, 0x00, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	[BAM_P_EVNT_DEST_ADDR]	= { 0x1382C, 0x00, 0x1000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 	[BAM_P_EVNT_REG]	= { 0x13818, 0x00, 0x1000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 	[BAM_P_SW_OFSTS]	= { 0x13800, 0x00, 0x1000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	[BAM_P_DATA_FIFO_ADDR]	= { 0x13824, 0x00, 0x1000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 	[BAM_P_DESC_FIFO_ADDR]	= { 0x1381C, 0x00, 0x1000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	[BAM_P_EVNT_GEN_TRSHLD]	= { 0x13828, 0x00, 0x1000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) 	[BAM_P_FIFO_SIZES]	= { 0x13820, 0x00, 0x1000, 0x00 },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) /* BAM CTRL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) #define BAM_SW_RST			BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) #define BAM_EN				BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) #define BAM_EN_ACCUM			BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) #define BAM_TESTBUS_SEL_SHIFT		5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) #define BAM_TESTBUS_SEL_MASK		0x3F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) #define BAM_DESC_CACHE_SEL_SHIFT	13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) #define BAM_DESC_CACHE_SEL_MASK		0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) #define BAM_CACHED_DESC_STORE		BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) #define IBC_DISABLE			BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) /* BAM REVISION */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) #define REVISION_SHIFT		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) #define REVISION_MASK		0xFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) #define NUM_EES_SHIFT		8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) #define NUM_EES_MASK		0xF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) #define CE_BUFFER_SIZE		BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) #define AXI_ACTIVE		BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) #define USE_VMIDMT		BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) #define SECURED			BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) #define BAM_HAS_NO_BYPASS	BIT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) #define HIGH_FREQUENCY_BAM	BIT(18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) #define INACTIV_TMRS_EXST	BIT(19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) #define NUM_INACTIV_TMRS	BIT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) #define DESC_CACHE_DEPTH_SHIFT	21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) #define DESC_CACHE_DEPTH_1	(0 << DESC_CACHE_DEPTH_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) #define DESC_CACHE_DEPTH_2	(1 << DESC_CACHE_DEPTH_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) #define DESC_CACHE_DEPTH_3	(2 << DESC_CACHE_DEPTH_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) #define DESC_CACHE_DEPTH_4	(3 << DESC_CACHE_DEPTH_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) #define CMD_DESC_EN		BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) #define INACTIV_TMR_BASE_SHIFT	24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) #define INACTIV_TMR_BASE_MASK	0xFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) /* BAM NUM PIPES */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) #define BAM_NUM_PIPES_SHIFT		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) #define BAM_NUM_PIPES_MASK		0xFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) #define PERIPH_NON_PIPE_GRP_SHIFT	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) #define PERIPH_NON_PIP_GRP_MASK		0xFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) #define BAM_NON_PIPE_GRP_SHIFT		24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) #define BAM_NON_PIPE_GRP_MASK		0xFF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) /* BAM CNFG BITS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) #define BAM_PIPE_CNFG		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) #define BAM_FULL_PIPE		BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) #define BAM_NO_EXT_P_RST	BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) #define BAM_IBC_DISABLE		BIT(13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) #define BAM_SB_CLK_REQ		BIT(14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) #define BAM_PSM_CSW_REQ		BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) #define BAM_PSM_P_RES		BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) #define BAM_AU_P_RES		BIT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) #define BAM_SI_P_RES		BIT(18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) #define BAM_WB_P_RES		BIT(19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) #define BAM_WB_BLK_CSW		BIT(20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) #define BAM_WB_CSW_ACK_IDL	BIT(21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) #define BAM_WB_RETR_SVPNT	BIT(22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) #define BAM_WB_DSC_AVL_P_RST	BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) #define BAM_REG_P_EN		BIT(24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) #define BAM_PSM_P_HD_DATA	BIT(25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) #define BAM_AU_ACCUMED		BIT(26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) #define BAM_CMD_ENABLE		BIT(27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) #define BAM_CNFG_BITS_DEFAULT	(BAM_PIPE_CNFG |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 				 BAM_NO_EXT_P_RST |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 				 BAM_IBC_DISABLE |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 				 BAM_SB_CLK_REQ |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 				 BAM_PSM_CSW_REQ |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 				 BAM_PSM_P_RES |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 				 BAM_AU_P_RES |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 				 BAM_SI_P_RES |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 				 BAM_WB_P_RES |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 				 BAM_WB_BLK_CSW |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 				 BAM_WB_CSW_ACK_IDL |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 				 BAM_WB_RETR_SVPNT |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 				 BAM_WB_DSC_AVL_P_RST |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 				 BAM_REG_P_EN |		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 				 BAM_PSM_P_HD_DATA |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 				 BAM_AU_ACCUMED |	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 				 BAM_CMD_ENABLE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) /* PIPE CTRL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) #define P_EN			BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) #define P_DIRECTION		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) #define P_SYS_STRM		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) #define P_SYS_MODE		BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) #define P_AUTO_EOB		BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) #define P_AUTO_EOB_SEL_SHIFT	7
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) #define P_AUTO_EOB_SEL_512	(0 << P_AUTO_EOB_SEL_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) #define P_AUTO_EOB_SEL_256	(1 << P_AUTO_EOB_SEL_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) #define P_AUTO_EOB_SEL_128	(2 << P_AUTO_EOB_SEL_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) #define P_AUTO_EOB_SEL_64	(3 << P_AUTO_EOB_SEL_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) #define P_PREFETCH_LIMIT_SHIFT	9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) #define P_PREFETCH_LIMIT_32	(0 << P_PREFETCH_LIMIT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) #define P_PREFETCH_LIMIT_16	(1 << P_PREFETCH_LIMIT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) #define P_PREFETCH_LIMIT_4	(2 << P_PREFETCH_LIMIT_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) #define P_WRITE_NWD		BIT(11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) #define P_LOCK_GROUP_SHIFT	16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) #define P_LOCK_GROUP_MASK	0x1F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) /* BAM_DESC_CNT_TRSHLD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) #define CNT_TRSHLD		0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) #define DEFAULT_CNT_THRSHLD	0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) /* BAM_IRQ_SRCS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) #define BAM_IRQ			BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) #define P_IRQ			0x7fffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) /* BAM_IRQ_SRCS_MSK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) #define BAM_IRQ_MSK		BAM_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) #define P_IRQ_MSK		P_IRQ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) /* BAM_IRQ_STTS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) #define BAM_TIMER_IRQ		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) #define BAM_EMPTY_IRQ		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) #define BAM_ERROR_IRQ		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) #define BAM_HRESP_ERR_IRQ	BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) /* BAM_IRQ_CLR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) #define BAM_TIMER_CLR		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) #define BAM_EMPTY_CLR		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) #define BAM_ERROR_CLR		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) #define BAM_HRESP_ERR_CLR	BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) /* BAM_IRQ_EN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) #define BAM_TIMER_EN		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) #define BAM_EMPTY_EN		BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) #define BAM_ERROR_EN		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) #define BAM_HRESP_ERR_EN	BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) /* BAM_P_IRQ_EN */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) #define P_PRCSD_DESC_EN		BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) #define P_TIMER_EN		BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) #define P_WAKE_EN		BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) #define P_OUT_OF_DESC_EN	BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) #define P_ERR_EN		BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) #define P_TRNSFR_END_EN		BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) #define P_DEFAULT_IRQS_EN	(P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) /* BAM_P_SW_OFSTS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) #define P_SW_OFSTS_MASK		0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) #define BAM_DESC_FIFO_SIZE	SZ_32K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) #define BAM_FIFO_SIZE	(SZ_32K - 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) #define IS_BUSY(chan)	(CIRC_SPACE(bchan->tail, bchan->head,\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 			 MAX_DESCRIPTORS + 1) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) struct bam_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	struct virt_dma_chan vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	struct bam_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	/* configuration from device tree */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	u32 id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	/* runtime configuration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	struct dma_slave_config slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	/* fifo storage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	struct bam_desc_hw *fifo_virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	dma_addr_t fifo_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	/* fifo markers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	unsigned short head;		/* start of active descriptor entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	unsigned short tail;		/* end of active descriptor entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	unsigned int initialized;	/* is the channel hw initialized? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	unsigned int paused;		/* is the channel paused? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 	unsigned int reconfigure;	/* new slave config? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 	/* list of descriptors currently processed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 	struct list_head desc_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 	return container_of(common, struct bam_chan, vc.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) struct bam_device {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	void __iomem *regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	struct dma_device common;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 	struct bam_chan *channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	u32 num_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 	u32 num_ees;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 	/* execution environment ID, from DT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	u32 ee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	bool controlled_remotely;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	const struct reg_offset_data *layout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 	struct clk *bamclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	/* dma start transaction tasklet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	struct tasklet_struct task;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402)  * bam_addr - returns BAM register address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403)  * @bdev: bam device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404)  * @pipe: pipe instance (ignored when register doesn't have multiple instances)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405)  * @reg:  register enum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 		enum bam_reg reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	const struct reg_offset_data r = bdev->layout[reg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 	return bdev->regs + r.base_offset +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		r.pipe_mult * pipe +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		r.evnt_mult * pipe +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		r.ee_mult * bdev->ee;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419)  * bam_reset_channel - Reset individual BAM DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420)  * @bchan: bam channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422)  * This function resets a specific BAM channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) static void bam_reset_channel(struct bam_chan *bchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	struct bam_device *bdev = bchan->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	lockdep_assert_held(&bchan->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	/* reset channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	/* don't allow cpu to reorder BAM register accesses done after this */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	/* make sure hw is initialized when channel is used the first time  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 	bchan->initialized = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442)  * bam_chan_init_hw - Initialize channel hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443)  * @bchan: bam channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444)  * @dir: DMA transfer direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446)  * This function resets and initializes the BAM channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) static void bam_chan_init_hw(struct bam_chan *bchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 	enum dma_transfer_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	struct bam_device *bdev = bchan->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 	/* Reset the channel to clear internal state of the FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	bam_reset_channel(bchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 	 * write out 8 byte aligned address.  We have enough space for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	 * because we allocated 1 more descriptor (8 bytes) than we can use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 			bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 	writel_relaxed(BAM_FIFO_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 			bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	/* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 	writel_relaxed(P_DEFAULT_IRQS_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 			bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 	/* unmask the specific pipe and EE combo */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 	val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 	val |= BIT(bchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 	writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	/* don't allow cpu to reorder the channel enable done below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 	/* set fixed direction and mode, then enable channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	val = P_EN | P_SYS_MODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	if (dir == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 		val |= P_DIRECTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	bchan->initialized = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	/* init FIFO pointers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	bchan->head = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	bchan->tail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493)  * bam_alloc_chan - Allocate channel resources for DMA channel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494)  * @chan: specified channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496)  * This function allocates the FIFO descriptor memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) static int bam_alloc_chan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	struct bam_chan *bchan = to_bam_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	struct bam_device *bdev = bchan->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	if (bchan->fifo_virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	/* allocate FIFO descriptor space, but only if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 					&bchan->fifo_phys, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	if (!bchan->fifo_virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 		dev_err(bdev->dev, "Failed to allocate desc fifo\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) static int bam_pm_runtime_get_sync(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	if (pm_runtime_enabled(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 		return pm_runtime_get_sync(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527)  * bam_free_chan - Frees dma resources associated with specific channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528)  * @chan: specified channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530)  * Free the allocated fifo descriptor memory and channel resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) static void bam_free_chan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 	struct bam_chan *bchan = to_bam_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	struct bam_device *bdev = bchan->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	ret = bam_pm_runtime_get_sync(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	vchan_free_chan_resources(to_virt_chan(chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	if (!list_empty(&bchan->desc_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 		dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 	spin_lock_irqsave(&bchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 	bam_reset_channel(bchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	spin_unlock_irqrestore(&bchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 	dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 		    bchan->fifo_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 	bchan->fifo_virt = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 	/* mask irq for pipe/channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 	val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	val &= ~BIT(bchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 	writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	/* disable irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 	writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	pm_runtime_mark_last_busy(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	pm_runtime_put_autosuspend(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574)  * bam_slave_config - set slave configuration for channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575)  * @chan: dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576)  * @cfg: slave configuration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  * Sets slave configuration for channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) static int bam_slave_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 			    struct dma_slave_config *cfg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	struct bam_chan *bchan = to_bam_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	spin_lock_irqsave(&bchan->vc.lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	memcpy(&bchan->slave, cfg, sizeof(*cfg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	bchan->reconfigure = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 	spin_unlock_irqrestore(&bchan->vc.lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596)  * bam_prep_slave_sg - Prep slave sg transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598)  * @chan: dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599)  * @sgl: scatter gather list
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600)  * @sg_len: length of sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601)  * @direction: DMA transfer direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602)  * @flags: DMA flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603)  * @context: transfer context (unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	struct scatterlist *sgl, unsigned int sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	enum dma_transfer_direction direction, unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 	void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	struct bam_chan *bchan = to_bam_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 	struct bam_device *bdev = bchan->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	struct bam_async_desc *async_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	struct bam_desc_hw *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	unsigned int num_alloc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	if (!is_slave_direction(direction)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 		dev_err(bdev->dev, "invalid dma direction\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	/* calculate number of required entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	for_each_sg(sgl, sg, sg_len, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	/* allocate enough room to accomodate the number of entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	async_desc = kzalloc(struct_size(async_desc, desc, num_alloc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			     GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 	if (!async_desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		goto err_out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	if (flags & DMA_PREP_FENCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		async_desc->flags |= DESC_FLAG_NWD;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 	if (flags & DMA_PREP_INTERRUPT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 		async_desc->flags |= DESC_FLAG_EOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 	async_desc->num_desc = num_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	async_desc->curr_desc = async_desc->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	async_desc->dir = direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	/* fill in temporary descriptors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 	desc = async_desc->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		unsigned int remainder = sg_dma_len(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		unsigned int curr_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 			if (flags & DMA_PREP_CMD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 				desc->flags |= cpu_to_le16(DESC_FLAG_CMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 			desc->addr = cpu_to_le32(sg_dma_address(sg) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 						 curr_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 			if (remainder > BAM_FIFO_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 				desc->size = cpu_to_le16(BAM_FIFO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 				remainder -= BAM_FIFO_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 				curr_offset += BAM_FIFO_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 				desc->size = cpu_to_le16(remainder);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 				remainder = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 			async_desc->length += le16_to_cpu(desc->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 			desc++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 		} while (remainder > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 	return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) err_out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	kfree(async_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  * bam_dma_terminate_all - terminate all transactions on a channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681)  * @chan: bam dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683)  * Dequeues and frees all transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684)  * No callbacks are done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) static int bam_dma_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 	struct bam_chan *bchan = to_bam_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 	struct bam_async_desc *async_desc, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	/* remove all transactions, including active transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 	spin_lock_irqsave(&bchan->vc.lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 	 * If we have transactions queued, then some might be committed to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 	 * hardware in the desc fifo.  The only way to reset the desc fifo is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	 * to do a hardware reset (either by pipe or the entire block).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	 * bam_chan_init_hw() will trigger a pipe reset, and also reinit the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	 * pipe.  If the pipe is left disabled (default state after pipe reset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	 * and is accessed by a connected hardware engine, a fatal error in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	 * the BAM will occur.  There is a small window where this could happen
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 	 * with bam_chan_init_hw(), but it is assumed that the caller has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 	 * stopped activity on any attached hardware engine.  Make sure to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	 * this first so that the BAM hardware doesn't cause memory corruption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) 	 * by accessing freed resources.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) 	if (!list_empty(&bchan->desc_list)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 		async_desc = list_first_entry(&bchan->desc_list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 					      struct bam_async_desc, desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 		bam_chan_init_hw(bchan, async_desc->dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	list_for_each_entry_safe(async_desc, tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 				 &bchan->desc_list, desc_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 		list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 		list_del(&async_desc->desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	vchan_get_all_descriptors(&bchan->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	spin_unlock_irqrestore(&bchan->vc.lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	vchan_dma_desc_free_list(&bchan->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730)  * bam_pause - Pause DMA channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731)  * @chan: dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) static int bam_pause(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	struct bam_chan *bchan = to_bam_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 	struct bam_device *bdev = bchan->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 	ret = bam_pm_runtime_get_sync(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 	spin_lock_irqsave(&bchan->vc.lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	bchan->paused = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	spin_unlock_irqrestore(&bchan->vc.lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	pm_runtime_mark_last_busy(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	pm_runtime_put_autosuspend(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756)  * bam_resume - Resume DMA channel operations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757)  * @chan: dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) static int bam_resume(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	struct bam_chan *bchan = to_bam_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	struct bam_device *bdev = bchan->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 	unsigned long flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	ret = bam_pm_runtime_get_sync(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	spin_lock_irqsave(&bchan->vc.lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	bchan->paused = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	spin_unlock_irqrestore(&bchan->vc.lock, flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 	pm_runtime_mark_last_busy(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	pm_runtime_put_autosuspend(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782)  * process_channel_irqs - processes the channel interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783)  * @bdev: bam controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785)  * This function processes the channel interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) static u32 process_channel_irqs(struct bam_device *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 	u32 i, srcs, pipe_stts, offset, avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	struct bam_async_desc *async_desc, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	/* return early if no pipe/channel interrupts are present */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	if (!(srcs & P_IRQ))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 		return srcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	for (i = 0; i < bdev->num_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 		struct bam_chan *bchan = &bdev->channels[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 		if (!(srcs & BIT(i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 		/* clear pipe irq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 		writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 		spin_lock_irqsave(&bchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		offset = readl_relaxed(bam_addr(bdev, i, BAM_P_SW_OFSTS)) &
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 				       P_SW_OFSTS_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 		offset /= sizeof(struct bam_desc_hw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 		/* Number of bytes available to read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		if (offset < bchan->head)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 			avail--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 		list_for_each_entry_safe(async_desc, tmp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 					 &bchan->desc_list, desc_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 			/* Not enough data to read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 			if (avail < async_desc->xfer_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 			/* manage FIFO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 			bchan->head += async_desc->xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 			bchan->head %= MAX_DESCRIPTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 			async_desc->num_desc -= async_desc->xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 			async_desc->curr_desc += async_desc->xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 			avail -= async_desc->xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 			 * if complete, process cookie. Otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 			 * push back to front of desc_issued so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 			 * it gets restarted by the tasklet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 			if (!async_desc->num_desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 				vchan_cookie_complete(&async_desc->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 				list_add(&async_desc->vd.node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 					 &bchan->vc.desc_issued);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 			list_del(&async_desc->desc_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		spin_unlock_irqrestore(&bchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	return srcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858)  * bam_dma_irq - irq handler for bam controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859)  * @irq: IRQ of interrupt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860)  * @data: callback data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862)  * IRQ handler for the bam controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) static irqreturn_t bam_dma_irq(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 	struct bam_device *bdev = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 	u32 clr_mask = 0, srcs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 	srcs |= process_channel_irqs(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	/* kick off tasklet to start next dma transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 	if (srcs & P_IRQ)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		tasklet_schedule(&bdev->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 	ret = bam_pm_runtime_get_sync(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	if (srcs & BAM_IRQ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 		 * don't allow reorder of the various accesses to the BAM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		 * registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 	pm_runtime_mark_last_busy(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	pm_runtime_put_autosuspend(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  * bam_tx_status - returns status of transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  * @chan: dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901)  * @cookie: transaction cookie
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902)  * @txstate: DMA transaction state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904)  * Return status of dma transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 		struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 	struct bam_chan *bchan = to_bam_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 	struct bam_async_desc *async_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 	struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 	size_t residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 	ret = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	if (ret == DMA_COMPLETE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	if (!txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 		return bchan->paused ? DMA_PAUSED : ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	spin_lock_irqsave(&bchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	vd = vchan_find_desc(&bchan->vc, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	if (vd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		residue = container_of(vd, struct bam_async_desc, vd)->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 		list_for_each_entry(async_desc, &bchan->desc_list, desc_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 			if (async_desc->vd.tx.cookie != cookie)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 			for (i = 0; i < async_desc->num_desc; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 				residue += le16_to_cpu(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 						async_desc->curr_desc[i].size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	spin_unlock_irqrestore(&bchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	dma_set_residue(txstate, residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	if (ret == DMA_IN_PROGRESS && bchan->paused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 		ret = DMA_PAUSED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950)  * bam_apply_new_config
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951)  * @bchan: bam dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952)  * @dir: DMA direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) static void bam_apply_new_config(struct bam_chan *bchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	enum dma_transfer_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 	struct bam_device *bdev = bchan->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 	u32 maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	if (!bdev->controlled_remotely) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 		if (dir == DMA_DEV_TO_MEM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 			maxburst = bchan->slave.src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 			maxburst = bchan->slave.dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 		writel_relaxed(maxburst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 			       bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	bchan->reconfigure = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974)  * bam_start_dma - start next transaction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975)  * @bchan: bam dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) static void bam_start_dma(struct bam_chan *bchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 	struct bam_device *bdev = bchan->bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 	struct bam_async_desc *async_desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	struct bam_desc_hw *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 					sizeof(struct bam_desc_hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	unsigned int avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 	struct dmaengine_desc_callback cb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 	lockdep_assert_held(&bchan->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	if (!vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	ret = bam_pm_runtime_get_sync(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 	while (vd && !IS_BUSY(bchan)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 		list_del(&vd->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		async_desc = container_of(vd, struct bam_async_desc, vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		/* on first use, initialize the channel hardware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 		if (!bchan->initialized)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 			bam_chan_init_hw(bchan, async_desc->dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 		/* apply new slave config changes, if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		if (bchan->reconfigure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 			bam_apply_new_config(bchan, async_desc->dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		desc = async_desc->curr_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		avail = CIRC_SPACE(bchan->tail, bchan->head,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 				   MAX_DESCRIPTORS + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		if (async_desc->num_desc > avail)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 			async_desc->xfer_len = avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 			async_desc->xfer_len = async_desc->num_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		/* set any special flags on the last descriptor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		if (async_desc->num_desc == async_desc->xfer_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 			desc[async_desc->xfer_len - 1].flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 						cpu_to_le16(async_desc->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		vd = vchan_next_desc(&bchan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 		dmaengine_desc_get_callback(&async_desc->vd.tx, &cb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		 * An interrupt is generated at this desc, if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		 *  - FIFO is FULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		 *  - No more descriptors to add.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 		 *  - If a callback completion was requested for this DESC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		 *     In this case, BAM will deliver the completion callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		 *     for this desc and continue processing the next desc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		if (((avail <= async_desc->xfer_len) || !vd ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		     dmaengine_desc_callback_valid(&cb)) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		    !(async_desc->flags & DESC_FLAG_EOT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			desc[async_desc->xfer_len - 1].flags |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 				cpu_to_le16(DESC_FLAG_INT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 		if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			u32 partial = MAX_DESCRIPTORS - bchan->tail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			memcpy(&fifo[bchan->tail], desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 			       partial * sizeof(struct bam_desc_hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 			memcpy(fifo, &desc[partial],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 			       (async_desc->xfer_len - partial) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 				sizeof(struct bam_desc_hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			memcpy(&fifo[bchan->tail], desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			       async_desc->xfer_len *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 			       sizeof(struct bam_desc_hw));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 		bchan->tail += async_desc->xfer_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 		bchan->tail %= MAX_DESCRIPTORS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 		list_add_tail(&async_desc->desc_node, &bchan->desc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 	/* ensure descriptor writes and dma start not reordered */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 			bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	pm_runtime_mark_last_busy(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 	pm_runtime_put_autosuspend(bdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072)  * dma_tasklet - DMA IRQ tasklet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)  * @t: tasklet argument (bam controller structure)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)  * Sets up next DMA operation and then processes all completed transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) static void dma_tasklet(struct tasklet_struct *t)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	struct bam_device *bdev = from_tasklet(bdev, t, task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 	struct bam_chan *bchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 	/* go through the channels and kick off transactions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 	for (i = 0; i < bdev->num_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		bchan = &bdev->channels[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 		spin_lock_irqsave(&bchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 		if (!list_empty(&bchan->vc.desc_issued) && !IS_BUSY(bchan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 			bam_start_dma(bchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 		spin_unlock_irqrestore(&bchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)  * bam_issue_pending - starts pending transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)  * @chan: dma channel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)  * Calls tasklet directly which in turn starts any pending transactions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static void bam_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) 	struct bam_chan *bchan = to_bam_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 	spin_lock_irqsave(&bchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) 	/* if work pending and idle, start a transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	if (vchan_issue_pending(&bchan->vc) && !IS_BUSY(bchan))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 		bam_start_dma(bchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 	spin_unlock_irqrestore(&bchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)  * bam_dma_free_desc - free descriptor memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118)  * @vd: virtual descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) static void bam_dma_free_desc(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	struct bam_async_desc *async_desc = container_of(vd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 			struct bam_async_desc, vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 	kfree(async_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 		struct of_dma *of)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	struct bam_device *bdev = container_of(of->of_dma_data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 					struct bam_device, common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	unsigned int request;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 	if (dma_spec->args_count != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 	request = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	if (request >= bdev->num_channels)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 	return dma_get_slave_channel(&(bdev->channels[request].vc.chan));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)  * bam_init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)  * @bdev: bam device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150)  * Initialization helper for global bam registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) static int bam_init(struct bam_device *bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 	u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	/* read revision and configuration information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 	if (!bdev->num_ees) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 		val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 		bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	/* check that configured EE is within range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 	if (bdev->ee >= bdev->num_ees)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	if (!bdev->num_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 		val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 		bdev->num_channels = val & BAM_NUM_PIPES_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 	if (bdev->controlled_remotely)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 	/* s/w reset bam */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 	/* after reset all pipes are disabled and idle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 	val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 	val |= BAM_SW_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 	writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	val &= ~BAM_SW_RST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	/* make sure previous stores are visible before enabling BAM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	/* enable bam */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	val |= BAM_EN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) 	/* set descriptor threshhold, start with 4 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 	writel_relaxed(DEFAULT_CNT_THRSHLD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 			bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 	writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	/* enable irqs for errors */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 	writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 			bam_addr(bdev, 0, BAM_IRQ_EN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	/* unmask global bam interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 	writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 	u32 index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	bchan->id = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	bchan->bdev = bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	vchan_init(&bchan->vc, &bdev->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 	bchan->vc.desc_free = bam_dma_free_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	INIT_LIST_HEAD(&bchan->desc_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) static const struct of_device_id bam_of_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	{ .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	{ .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	{ .compatible = "qcom,bam-v1.7.0", .data = &bam_v1_7_reg_info },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) MODULE_DEVICE_TABLE(of, bam_of_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) static int bam_dma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	struct bam_device *bdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 	const struct of_device_id *match;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 	struct resource *iores;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	int ret, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 	bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	if (!bdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	bdev->dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 	match = of_match_node(bam_of_match, pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	if (!match) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		dev_err(&pdev->dev, "Unsupported BAM module\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	bdev->layout = match->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 	if (IS_ERR(bdev->regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 		return PTR_ERR(bdev->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	bdev->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	if (bdev->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 		return bdev->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 	ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 		dev_err(bdev->dev, "Execution environment unspecified\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 	bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 						"qcom,controlled-remotely");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 	if (bdev->controlled_remotely) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 					   &bdev->num_channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 			dev_err(bdev->dev, "num-channels unspecified in dt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 		ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 					   &bdev->num_ees);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 			dev_err(bdev->dev, "num-ees unspecified in dt\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	if (IS_ERR(bdev->bamclk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 		if (!bdev->controlled_remotely)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 			return PTR_ERR(bdev->bamclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 		bdev->bamclk = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 	ret = clk_prepare_enable(bdev->bamclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 		dev_err(bdev->dev, "failed to prepare/enable clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 	ret = bam_init(bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 		goto err_disable_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	tasklet_setup(&bdev->task, dma_tasklet);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 				sizeof(*bdev->channels), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	if (!bdev->channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 		ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 		goto err_tasklet_kill;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 	/* allocate and initialize channels */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 	INIT_LIST_HEAD(&bdev->common.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 	for (i = 0; i < bdev->num_channels; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 		bam_channel_init(bdev, &bdev->channels[i], i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 	ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 			IRQF_TRIGGER_HIGH, "bam_dma", bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		goto err_bam_channel_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 	/* set max dma segment size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	bdev->common.dev = bdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 	ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		dev_err(bdev->dev, "cannot set maximum segment size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		goto err_bam_channel_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 	platform_set_drvdata(pdev, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 	/* set capabilities */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 	dma_cap_zero(bdev->common.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 	/* initialize dmaengine apis */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 	bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 	bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 	bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 	bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	bdev->common.device_alloc_chan_resources = bam_alloc_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 	bdev->common.device_free_chan_resources = bam_free_chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) 	bdev->common.device_config = bam_slave_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 	bdev->common.device_pause = bam_pause;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) 	bdev->common.device_resume = bam_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) 	bdev->common.device_terminate_all = bam_dma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	bdev->common.device_issue_pending = bam_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) 	bdev->common.device_tx_status = bam_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 	bdev->common.dev = bdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) 	ret = dma_async_device_register(&bdev->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 		dev_err(bdev->dev, "failed to register dma async device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 		goto err_bam_channel_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 					&bdev->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		goto err_unregister_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	if (bdev->controlled_remotely) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		pm_runtime_disable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	pm_runtime_irq_safe(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	pm_runtime_use_autosuspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 	pm_runtime_mark_last_busy(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	pm_runtime_set_active(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 	pm_runtime_enable(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) err_unregister_dma:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	dma_async_device_unregister(&bdev->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) err_bam_channel_exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 	for (i = 0; i < bdev->num_channels; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		tasklet_kill(&bdev->channels[i].vc.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) err_tasklet_kill:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	tasklet_kill(&bdev->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) err_disable_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 	clk_disable_unprepare(bdev->bamclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) static int bam_dma_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 	struct bam_device *bdev = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	u32 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 	pm_runtime_force_suspend(&pdev->dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	of_dma_controller_free(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 	dma_async_device_unregister(&bdev->common);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	/* mask all interrupts for this execution environment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 	writel_relaxed(0, bam_addr(bdev, 0,  BAM_IRQ_SRCS_MSK_EE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	devm_free_irq(bdev->dev, bdev->irq, bdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 	for (i = 0; i < bdev->num_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) 		bam_dma_terminate_all(&bdev->channels[i].vc.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) 		tasklet_kill(&bdev->channels[i].vc.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 		if (!bdev->channels[i].fifo_virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 		dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 			    bdev->channels[i].fifo_virt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 			    bdev->channels[i].fifo_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	tasklet_kill(&bdev->task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 	clk_disable_unprepare(bdev->bamclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) static int __maybe_unused bam_dma_runtime_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	struct bam_device *bdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 	clk_disable(bdev->bamclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) static int __maybe_unused bam_dma_runtime_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	struct bam_device *bdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 	ret = clk_enable(bdev->bamclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	if (ret < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 		dev_err(dev, "clk_enable failed: %d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) static int __maybe_unused bam_dma_suspend(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	struct bam_device *bdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	if (!bdev->controlled_remotely)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		pm_runtime_force_suspend(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	clk_unprepare(bdev->bamclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) static int __maybe_unused bam_dma_resume(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 	struct bam_device *bdev = dev_get_drvdata(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 	ret = clk_prepare(bdev->bamclk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 	if (!bdev->controlled_remotely)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 		pm_runtime_force_resume(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) static const struct dev_pm_ops bam_dma_pm_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 	SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 				NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) static struct platform_driver bam_dma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	.probe = bam_dma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 	.remove = bam_dma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 	.driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 		.name = "bam-dma-engine",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		.pm = &bam_dma_pm_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 		.of_match_table = bam_of_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) module_platform_driver(bam_dma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) MODULE_DESCRIPTION("QCOM BAM DMA engine driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) MODULE_LICENSE("GPL v2");