^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0+ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright 2013-2014 Freescale Semiconductor, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright 2018 Angelo Dureghello <angelo@sysam.it>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef _FSL_EDMA_COMMON_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define _FSL_EDMA_COMMON_H_
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dma-direction.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include "virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #define EDMA_CR_EDBG BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define EDMA_CR_ERCA BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define EDMA_CR_ERGA BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define EDMA_CR_HOE BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define EDMA_CR_HALT BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define EDMA_CR_CLM BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define EDMA_CR_EMLM BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define EDMA_CR_ECX BIT(16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define EDMA_CR_CX BIT(17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define EDMA_SEEI_SEEI(x) ((x) & GENMASK(4, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define EDMA_CEEI_CEEI(x) ((x) & GENMASK(4, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define EDMA_CINT_CINT(x) ((x) & GENMASK(4, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define EDMA_CERR_CERR(x) ((x) & GENMASK(4, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define EDMA_TCD_ATTR_DSIZE(x) (((x) & GENMASK(2, 0)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define EDMA_TCD_ATTR_DMOD(x) (((x) & GENMASK(4, 0)) << 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define EDMA_TCD_ATTR_DSIZE_8BIT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(2) | BIT(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define EDMA_TCD_ATTR_SSIZE_8BIT 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define EDMA_TCD_ATTR_SSIZE_64BIT (EDMA_TCD_ATTR_DSIZE_64BIT << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define EDMA_TCD_ATTR_SSIZE_32BYTE (EDMA_TCD_ATTR_DSIZE_32BYTE << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define EDMA_TCD_CSR_START BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define EDMA_TCD_CSR_INT_MAJOR BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define EDMA_TCD_CSR_INT_HALF BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define EDMA_TCD_CSR_D_REQ BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define EDMA_TCD_CSR_E_SG BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define EDMA_TCD_CSR_E_LINK BIT(5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define EDMA_TCD_CSR_ACTIVE BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define EDMA_TCD_CSR_DONE BIT(7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define EDMAMUX_CHCFG_DIS 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define EDMAMUX_CHCFG_ENBL 0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define DMAMUX_NR 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) enum fsl_edma_pm_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) RUNNING = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) SUSPENDED,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct fsl_edma_hw_tcd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) __le32 saddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) __le16 soff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) __le16 attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) __le32 nbytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) __le32 slast;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) __le32 daddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) __le16 doff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) __le16 citer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) __le32 dlast_sga;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) __le16 csr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) __le16 biter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * These are iomem pointers, for both v32 and v64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) struct edma_regs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) void __iomem *cr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) void __iomem *es;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) void __iomem *erqh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) void __iomem *erql; /* aka erq on v32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) void __iomem *eeih;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) void __iomem *eeil; /* aka eei on v32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) void __iomem *seei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) void __iomem *ceei;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) void __iomem *serq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) void __iomem *cerq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) void __iomem *cint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) void __iomem *cerr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void __iomem *ssrt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) void __iomem *cdne;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void __iomem *inth;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) void __iomem *intl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) void __iomem *errh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) void __iomem *errl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct fsl_edma_hw_tcd __iomem *tcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct fsl_edma_sw_tcd {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) dma_addr_t ptcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct fsl_edma_hw_tcd *vtcd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct fsl_edma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) struct virt_dma_chan vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) enum fsl_edma_pm_state pm_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) bool idle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) u32 slave_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) struct fsl_edma_engine *edma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct fsl_edma_desc *edesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct dma_slave_config cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) u32 attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) struct dma_pool *tcd_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) dma_addr_t dma_dev_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) u32 dma_dev_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) enum dma_data_direction dma_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) char chan_name[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct fsl_edma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) struct virt_dma_desc vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct fsl_edma_chan *echan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) bool iscyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) enum dma_transfer_direction dirn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned int n_tcds;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct fsl_edma_sw_tcd tcd[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) enum edma_version {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) v1, /* 32ch, Vybrid, mpc57x, etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) v2, /* 64ch Coldfire */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) v3, /* 32ch, i.mx7ulp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) struct fsl_edma_drvdata {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) enum edma_version version;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) u32 dmamuxs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) bool has_dmaclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) bool mux_swap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int (*setup_irq)(struct platform_device *pdev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct fsl_edma_engine *fsl_edma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct fsl_edma_engine {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct dma_device dma_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) void __iomem *membase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) void __iomem *muxbase[DMAMUX_NR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct clk *muxclk[DMAMUX_NR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct clk *dmaclk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct mutex fsl_edma_mutex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) const struct fsl_edma_drvdata *drvdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u32 n_chans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int txirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) int errirq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) bool big_endian;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct edma_regs regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct fsl_edma_chan chans[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * R/W functions for big- or little-endian registers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * The eDMA controller's endian is independent of the CPU core's endian.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * For the big-endian IP module, the offset for 8-bit or 16-bit registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * should also be swapped opposite to that in little-endian IP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (edma->big_endian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) return ioread32be(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return ioread32(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static inline void edma_writeb(struct fsl_edma_engine *edma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) u8 val, void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* swap the reg offset for these in big-endian mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (edma->big_endian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) iowrite8(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static inline void edma_writew(struct fsl_edma_engine *edma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) u16 val, void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* swap the reg offset for these in big-endian mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) if (edma->big_endian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) iowrite16(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) static inline void edma_writel(struct fsl_edma_engine *edma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) u32 val, void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (edma->big_endian)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) iowrite32be(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) iowrite32(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return container_of(chan, struct fsl_edma_chan, vchan.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return container_of(vd, struct fsl_edma_desc, vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) unsigned int slot, bool enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) void fsl_edma_free_desc(struct virt_dma_desc *vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) int fsl_edma_terminate_all(struct dma_chan *chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) int fsl_edma_pause(struct dma_chan *chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) int fsl_edma_resume(struct dma_chan *chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) int fsl_edma_slave_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) struct dma_slave_config *cfg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) dma_cookie_t cookie, struct dma_tx_state *txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) size_t period_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) unsigned long flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) unsigned int sg_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) unsigned long flags, void *context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) void fsl_edma_issue_pending(struct dma_chan *chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) void fsl_edma_free_chan_resources(struct dma_chan *chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) void fsl_edma_cleanup_vchan(struct dma_device *dmadev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) void fsl_edma_setup_regs(struct fsl_edma_engine *edma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #endif /* _FSL_EDMA_COMMON_H_ */