^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Ingenic JZ4780 DMA controller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2015 Imagination Technologies
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Alex Smith <alex@alex-smith.me.uk>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include "dmaengine.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include "virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /* Global registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define JZ_DMA_REG_DMAC 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define JZ_DMA_REG_DIRQP 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define JZ_DMA_REG_DDR 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define JZ_DMA_REG_DDRS 0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define JZ_DMA_REG_DCKE 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define JZ_DMA_REG_DCKES 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define JZ_DMA_REG_DCKEC 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define JZ_DMA_REG_DMACP 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define JZ_DMA_REG_DSIRQP 0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define JZ_DMA_REG_DSIRQM 0x24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define JZ_DMA_REG_DCIRQP 0x28
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define JZ_DMA_REG_DCIRQM 0x2c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Per-channel registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define JZ_DMA_REG_CHAN(n) (n * 0x20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define JZ_DMA_REG_DSA 0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define JZ_DMA_REG_DTA 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define JZ_DMA_REG_DTC 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define JZ_DMA_REG_DRT 0x0c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define JZ_DMA_REG_DCS 0x10
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define JZ_DMA_REG_DCM 0x14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define JZ_DMA_REG_DDA 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define JZ_DMA_REG_DSD 0x1c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define JZ_DMA_DMAC_DMAE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define JZ_DMA_DMAC_AR BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define JZ_DMA_DMAC_HLT BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define JZ_DMA_DMAC_FAIC BIT(27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #define JZ_DMA_DMAC_FMSC BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define JZ_DMA_DRT_AUTO 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define JZ_DMA_DCS_CTE BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define JZ_DMA_DCS_HLT BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define JZ_DMA_DCS_TT BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define JZ_DMA_DCS_AR BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define JZ_DMA_DCS_DES8 BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define JZ_DMA_DCM_LINK BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define JZ_DMA_DCM_TIE BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) #define JZ_DMA_DCM_STDE BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #define JZ_DMA_DCM_TSZ_SHIFT 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #define JZ_DMA_DCM_TSZ_MASK (0x7 << JZ_DMA_DCM_TSZ_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define JZ_DMA_DCM_DP_SHIFT 12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define JZ_DMA_DCM_SP_SHIFT 14
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define JZ_DMA_DCM_DAI BIT(22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define JZ_DMA_DCM_SAI BIT(23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #define JZ_DMA_SIZE_4_BYTE 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #define JZ_DMA_SIZE_1_BYTE 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define JZ_DMA_SIZE_2_BYTE 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define JZ_DMA_SIZE_16_BYTE 0x3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define JZ_DMA_SIZE_32_BYTE 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define JZ_DMA_SIZE_64_BYTE 0x5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define JZ_DMA_SIZE_128_BYTE 0x6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define JZ_DMA_WIDTH_32_BIT 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define JZ_DMA_WIDTH_8_BIT 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define JZ_DMA_WIDTH_16_BIT 0x2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define JZ_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define JZ4780_DMA_CTRL_OFFSET 0x1000
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* macros for use with jz4780_dma_soc_data.flags */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define JZ_SOC_DATA_ALLOW_LEGACY_DT BIT(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define JZ_SOC_DATA_PROGRAMMABLE_DMA BIT(1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define JZ_SOC_DATA_PER_CHAN_PM BIT(2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define JZ_SOC_DATA_NO_DCKES_DCKEC BIT(3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define JZ_SOC_DATA_BREAK_LINKS BIT(4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * @dcm: value for the DCM (channel command) register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * @dsa: source address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * @dta: target address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * @dtc: transfer count (number of blocks of the transfer size specified in DCM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * to transfer) in the low 24 bits, offset of the next descriptor from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * descriptor base address in the upper 8 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) struct jz4780_dma_hwdesc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) uint32_t dcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) uint32_t dsa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) uint32_t dta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) uint32_t dtc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) /* Size of allocations for hardware descriptor blocks. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define JZ_DMA_DESC_BLOCK_SIZE PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define JZ_DMA_MAX_DESC \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) (JZ_DMA_DESC_BLOCK_SIZE / sizeof(struct jz4780_dma_hwdesc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct jz4780_dma_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) struct virt_dma_desc vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) struct jz4780_dma_hwdesc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) dma_addr_t desc_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) enum dma_transaction_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) uint32_t status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct jz4780_dma_chan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct virt_dma_chan vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned int id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct dma_pool *desc_pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) uint32_t transfer_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) uint32_t transfer_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct dma_slave_config config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct jz4780_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) unsigned int curr_hwdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) struct jz4780_dma_soc_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned int nb_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unsigned int transfer_ord_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct jz4780_dma_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) struct dma_device dma_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) void __iomem *chn_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) void __iomem *ctrl_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) unsigned int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) const struct jz4780_dma_soc_data *soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) uint32_t chan_reserved;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct jz4780_dma_chan chan[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) struct jz4780_dma_filter_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) uint32_t transfer_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int channel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static inline struct jz4780_dma_chan *to_jz4780_dma_chan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return container_of(chan, struct jz4780_dma_chan, vchan.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static inline struct jz4780_dma_desc *to_jz4780_dma_desc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct virt_dma_desc *vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) return container_of(vdesc, struct jz4780_dma_desc, vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) struct jz4780_dma_chan *jzchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return container_of(jzchan->vchan.chan.device, struct jz4780_dma_dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) dma_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static inline uint32_t jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) unsigned int chn, unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) unsigned int chn, unsigned int reg, uint32_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static inline uint32_t jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) unsigned int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) return readl(jzdma->ctrl_base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unsigned int reg, uint32_t val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) writel(val, jzdma->ctrl_base + reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) static inline void jz4780_dma_chan_enable(struct jz4780_dma_dev *jzdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) unsigned int chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) unsigned int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) reg = JZ_DMA_REG_DCKE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) reg = JZ_DMA_REG_DCKES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) jz4780_dma_ctrl_writel(jzdma, reg, BIT(chn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) unsigned int chn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if ((jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) !(jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct jz4780_dma_chan *jzchan, unsigned int count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) enum dma_transaction_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct jz4780_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (count > JZ_DMA_MAX_DESC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) &desc->desc_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (!desc->desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) desc->count = count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) desc->type = type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(vdesc->tx.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) kfree(desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static uint32_t jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) unsigned long val, uint32_t *shift)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int ord = ffs(val) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * 8 byte transfer sizes unsupported so fall back on 4. If it's larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * than the maximum, just limit it. It is perfectly safe to fall back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * in this way since we won't exceed the maximum burst size supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * by the device, the only effect is reduced efficiency. This is better
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * than refusing to perform the request at all.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (ord == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ord = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) else if (ord > jzdma->soc_data->transfer_ord_max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ord = jzdma->soc_data->transfer_ord_max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) *shift = ord;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) switch (ord) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return JZ_DMA_SIZE_1_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return JZ_DMA_SIZE_2_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return JZ_DMA_SIZE_4_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) return JZ_DMA_SIZE_16_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) case 5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return JZ_DMA_SIZE_32_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) case 6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) return JZ_DMA_SIZE_64_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return JZ_DMA_SIZE_128_BYTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) enum dma_transfer_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) struct dma_slave_config *config = &jzchan->config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) uint32_t width, maxburst, tsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (direction == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) desc->dcm = JZ_DMA_DCM_SAI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) desc->dsa = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) desc->dta = config->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) width = config->dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) maxburst = config->dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) desc->dcm = JZ_DMA_DCM_DAI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) desc->dsa = config->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) desc->dta = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) width = config->src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) maxburst = config->src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) * This calculates the maximum transfer size that can be used with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) * given address, length, width and maximum burst size. The address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) * must be aligned to the transfer size, the total length must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * divisible by the transfer size, and we must not use more than the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * maximum burst specified by the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) tsz = jz4780_dma_transfer_size(jzchan, addr | len | (width * maxburst),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) &jzchan->transfer_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) switch (width) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) case DMA_SLAVE_BUSWIDTH_1_BYTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) case DMA_SLAVE_BUSWIDTH_2_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) case DMA_SLAVE_BUSWIDTH_4_BYTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) width = JZ_DMA_WIDTH_32_BIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) desc->dtc = len >> jzchan->transfer_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) enum dma_transfer_direction direction, unsigned long flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) struct jz4780_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) for (i = 0; i < sg_len; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) sg_dma_address(&sgl[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) sg_dma_len(&sgl[i]),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) jz4780_dma_desc_free(&jzchan->desc->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) desc->desc[i].dcm |= JZ_DMA_DCM_TIE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) if (i != (sg_len - 1) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) !(jzdma->soc_data->flags & JZ_SOC_DATA_BREAK_LINKS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) /* Automatically proceeed to the next descriptor. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) desc->desc[i].dcm |= JZ_DMA_DCM_LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * The upper 8 bits of the DTC field in the descriptor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * must be set to (offset from descriptor base of next
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * descriptor >> 4).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) desc->desc[i].dtc |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) size_t period_len, enum dma_transfer_direction direction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct jz4780_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) unsigned int periods, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (buf_len % period_len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) periods = buf_len / period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) for (i = 0; i < periods; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) period_len, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (err < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) jz4780_dma_desc_free(&jzchan->desc->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) buf_addr += period_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * Set the link bit to indicate that the controller should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * automatically proceed to the next descriptor. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * jz4780_dma_begin(), this will be cleared if we need to issue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * an interrupt after each period.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) * The upper 8 bits of the DTC field in the descriptor must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * set to (offset from descriptor base of next descriptor >> 4).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) * If this is the last descriptor, link it back to the first,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * i.e. leave offset set to 0, otherwise point to the next one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (i != (periods - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) desc->desc[i].dtc |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) (((i + 1) * sizeof(*desc->desc)) >> 4) << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) struct jz4780_dma_desc *desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) uint32_t tsz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) if (!desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) &jzchan->transfer_shift);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) jzchan->transfer_type = JZ_DMA_DRT_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) desc->desc[0].dsa = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) desc->desc[0].dta = dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) tsz << JZ_DMA_DCM_TSZ_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_DP_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) desc->desc[0].dtc = len >> jzchan->transfer_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct virt_dma_desc *vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) dma_addr_t desc_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (!jzchan->desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) vdesc = vchan_next_desc(&jzchan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (!vdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) list_del(&vdesc->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) jzchan->desc = to_jz4780_dma_desc(vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) jzchan->curr_hwdesc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) * The DMA controller doesn't support triggering an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * interrupt after processing each descriptor, only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) * after processing an entire terminated list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) * descriptors. For a cyclic DMA setup the list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) * descriptors is not terminated so we can never get an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) * interrupt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * If the user requested a callback for a cyclic DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) * setup then we workaround this hardware limitation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) * here by degrading to a set of unlinked descriptors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * which we will submit in sequence in response to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * completion of processing the previous descriptor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) for (i = 0; i < jzchan->desc->count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * There is an existing transfer, therefore this must be one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * for which we unlinked the descriptors above. Advance to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * next one in the list.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) jzchan->curr_hwdesc =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) (jzchan->curr_hwdesc + 1) % jzchan->desc->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /* Enable the channel's clock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) jz4780_dma_chan_enable(jzdma, jzchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) /* Use 4-word descriptors. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) /* Set transfer type. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) jzchan->transfer_type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * Set the transfer count. This is redundant for a descriptor-driven
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * transfer. However, there can be a delay between the transfer start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) * time and when DTCn reg contains the new transfer count. Setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) * it explicitly ensures residue is computed correctly at all times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DTC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) jzchan->desc->desc[jzchan->curr_hwdesc].dtc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /* Write descriptor address and initiate descriptor fetch. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) desc_phys = jzchan->desc->desc_phys +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DDA, desc_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /* Enable the channel. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) JZ_DMA_DCS_CTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) static void jz4780_dma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) spin_lock_irqsave(&jzchan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) jz4780_dma_begin(jzchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) static int jz4780_dma_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) spin_lock_irqsave(&jzchan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) /* Clear the DMA status and stop the transfer. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) if (jzchan->desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) vchan_terminate_vdesc(&jzchan->desc->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) jzchan->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) jz4780_dma_chan_disable(jzdma, jzchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) vchan_get_all_descriptors(&jzchan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) vchan_dma_desc_free_list(&jzchan->vchan, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) static void jz4780_dma_synchronize(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) vchan_synchronize(&jzchan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) jz4780_dma_chan_disable(jzdma, jzchan->id);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) static int jz4780_dma_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) struct dma_slave_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) if ((config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) || (config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) /* Copy the reset of the slave configuration, it is used later. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) memcpy(&jzchan->config, config, sizeof(jzchan->config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct jz4780_dma_desc *desc, unsigned int next_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) unsigned int count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) for (i = next_sg; i < desc->count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) count += desc->desc[i].dtc & GENMASK(23, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (next_sg != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) count += jz4780_dma_chn_readl(jzdma, jzchan->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) JZ_DMA_REG_DTC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) return count << jzchan->transfer_shift;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) dma_cookie_t cookie, struct dma_tx_state *txstate)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) struct virt_dma_desc *vdesc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) enum dma_status status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) unsigned long residue = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) spin_lock_irqsave(&jzchan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) status = dma_cookie_status(chan, cookie, txstate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if ((status == DMA_COMPLETE) || (txstate == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) goto out_unlock_irqrestore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) vdesc = vchan_find_desc(&jzchan->vchan, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) if (vdesc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) /* On the issued list, so hasn't been processed yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) residue = jz4780_dma_desc_residue(jzchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) to_jz4780_dma_desc(vdesc), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) } else if (cookie == jzchan->desc->vdesc.tx.cookie) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) jzchan->curr_hwdesc + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) dma_set_residue(txstate, residue);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) status = DMA_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) out_unlock_irqrestore:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) return status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) struct jz4780_dma_chan *jzchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) const unsigned int soc_flags = jzdma->soc_data->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) struct jz4780_dma_desc *desc = jzchan->desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) uint32_t dcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) bool ack = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) spin_lock(&jzchan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) dcs = jz4780_dma_chn_readl(jzdma, jzchan->id, JZ_DMA_REG_DCS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (dcs & JZ_DMA_DCS_AR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) dev_warn(&jzchan->vchan.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) "address error (DCS=0x%x)\n", dcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) if (dcs & JZ_DMA_DCS_HLT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) dev_warn(&jzchan->vchan.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) "channel halt (DCS=0x%x)\n", dcs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) if (jzchan->desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) jzchan->desc->status = dcs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (jzchan->desc->type == DMA_CYCLIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) vchan_cyclic_callback(&jzchan->desc->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) jz4780_dma_begin(jzchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) } else if (dcs & JZ_DMA_DCS_TT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) if (!(soc_flags & JZ_SOC_DATA_BREAK_LINKS) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) (jzchan->curr_hwdesc + 1 == desc->count)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) vchan_cookie_complete(&desc->vdesc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) jzchan->desc = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) jz4780_dma_begin(jzchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) /* False positive - continue the transfer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) ack = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) jz4780_dma_chn_writel(jzdma, jzchan->id,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) JZ_DMA_REG_DCS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) JZ_DMA_DCS_CTE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) dev_err(&jzchan->vchan.chan.dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) "channel IRQ with no active transfer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) spin_unlock(&jzchan->vchan.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) return ack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) struct jz4780_dma_dev *jzdma = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) unsigned int nb_channels = jzdma->soc_data->nb_channels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) unsigned long pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) uint32_t dmac;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) for_each_set_bit(i, &pending, nb_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) pending &= ~BIT(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /* Clear halt and address error status of all channels. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) dmac = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DMAC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /* Clear interrupt pending status. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, pending);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) static int jz4780_dma_alloc_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) jzchan->desc_pool = dma_pool_create(dev_name(&chan->dev->device),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) chan->device->dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) JZ_DMA_DESC_BLOCK_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) PAGE_SIZE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) if (!jzchan->desc_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) dev_err(&chan->dev->device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) "failed to allocate descriptor pool\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) static void jz4780_dma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) vchan_free_chan_resources(&jzchan->vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) dma_pool_destroy(jzchan->desc_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) jzchan->desc_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct jz4780_dma_filter_data *data = param;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (data->channel > -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) if (data->channel != jzchan->id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) } else if (jzdma->chan_reserved & BIT(jzchan->id)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) jzchan->transfer_type = data->transfer_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) struct jz4780_dma_dev *jzdma = ofdma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) struct jz4780_dma_filter_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (dma_spec->args_count != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) data.transfer_type = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) data.channel = dma_spec->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (data.channel > -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) if (data.channel >= jzdma->soc_data->nb_channels) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) dev_err(jzdma->dma_device.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) "device requested non-existent channel %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) data.channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) /* Can only select a channel marked as reserved. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (!(jzdma->chan_reserved & BIT(data.channel))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) dev_err(jzdma->dma_device.dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) "device requested unreserved channel %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) data.channel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) jzdma->chan[data.channel].transfer_type = data.transfer_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) return dma_get_slave_channel(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) &jzdma->chan[data.channel].vchan.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) return __dma_request_channel(&mask, jz4780_dma_filter_fn, &data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) ofdma->of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) static int jz4780_dma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) struct device *dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) const struct jz4780_dma_soc_data *soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) struct jz4780_dma_dev *jzdma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) struct jz4780_dma_chan *jzchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) struct dma_device *dd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) if (!dev->of_node) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) dev_err(dev, "This driver must be probed from devicetree\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) soc_data = device_get_match_data(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (!soc_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) jzdma = devm_kzalloc(dev, struct_size(jzdma, chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) soc_data->nb_channels), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) if (!jzdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) jzdma->soc_data = soc_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) platform_set_drvdata(pdev, jzdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) jzdma->chn_base = devm_platform_ioremap_resource(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) if (IS_ERR(jzdma->chn_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return PTR_ERR(jzdma->chn_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) jzdma->ctrl_base = devm_ioremap_resource(dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) if (IS_ERR(jzdma->ctrl_base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) return PTR_ERR(jzdma->ctrl_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) } else if (soc_data->flags & JZ_SOC_DATA_ALLOW_LEGACY_DT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * On JZ4780, if the second memory resource was not supplied,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) * assume we're using an old devicetree, and calculate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) * offset to the control registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) jzdma->ctrl_base = jzdma->chn_base + JZ4780_DMA_CTRL_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) dev_err(dev, "failed to get I/O memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) jzdma->clk = devm_clk_get(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (IS_ERR(jzdma->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) dev_err(dev, "failed to get clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) ret = PTR_ERR(jzdma->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) clk_prepare_enable(jzdma->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) /* Property is optional, if it doesn't exist the value will remain 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) of_property_read_u32_index(dev->of_node, "ingenic,reserved-channels",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 0, &jzdma->chan_reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) dd = &jzdma->dma_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) dma_cap_set(DMA_MEMCPY, dd->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) dma_cap_set(DMA_SLAVE, dd->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) dma_cap_set(DMA_CYCLIC, dd->cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) dd->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) dd->copy_align = DMAENGINE_ALIGN_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) dd->device_alloc_chan_resources = jz4780_dma_alloc_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) dd->device_free_chan_resources = jz4780_dma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) dd->device_prep_slave_sg = jz4780_dma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) dd->device_prep_dma_cyclic = jz4780_dma_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) dd->device_prep_dma_memcpy = jz4780_dma_prep_dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) dd->device_config = jz4780_dma_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) dd->device_terminate_all = jz4780_dma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) dd->device_synchronize = jz4780_dma_synchronize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) dd->device_tx_status = jz4780_dma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) dd->device_issue_pending = jz4780_dma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) dd->src_addr_widths = JZ_DMA_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) dd->dst_addr_widths = JZ_DMA_BUSWIDTHS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) * Enable DMA controller, mark all channels as not programmable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) * Also set the FMSC bit - it increases MSC performance, so it makes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * little sense not to enable it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, JZ_DMA_DMAC_DMAE |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) JZ_DMA_DMAC_FAIC | JZ_DMA_DMAC_FMSC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if (soc_data->flags & JZ_SOC_DATA_PROGRAMMABLE_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMACP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) INIT_LIST_HEAD(&dd->channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) for (i = 0; i < soc_data->nb_channels; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) jzchan = &jzdma->chan[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) jzchan->id = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) vchan_init(&jzchan->vchan, dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) jzchan->vchan.desc_free = jz4780_dma_desc_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) ret = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) goto err_disable_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) jzdma->irq = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) jzdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) goto err_disable_clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) ret = dmaenginem_async_device_register(dd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) dev_err(dev, "failed to register device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) goto err_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) /* Register with OF DMA helpers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) ret = of_dma_controller_register(dev->of_node, jz4780_of_dma_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) jzdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) dev_err(dev, "failed to register OF DMA controller\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) goto err_free_irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) dev_info(dev, "JZ4780 DMA controller initialised\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) err_free_irq:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) free_irq(jzdma->irq, jzdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) err_disable_clk:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) clk_disable_unprepare(jzdma->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) static int jz4780_dma_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) struct jz4780_dma_dev *jzdma = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) of_dma_controller_free(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) clk_disable_unprepare(jzdma->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) free_irq(jzdma->irq, jzdma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) for (i = 0; i < jzdma->soc_data->nb_channels; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) tasklet_kill(&jzdma->chan[i].vchan.task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) .nb_channels = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) .transfer_ord_max = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) .flags = JZ_SOC_DATA_BREAK_LINKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) .nb_channels = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) .transfer_ord_max = 5,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) JZ_SOC_DATA_BREAK_LINKS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) .nb_channels = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) .transfer_ord_max = 6,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) .flags = JZ_SOC_DATA_PER_CHAN_PM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) .nb_channels = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) .transfer_ord_max = 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) .flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) static const struct jz4780_dma_soc_data x1000_dma_soc_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) .nb_channels = 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) .transfer_ord_max = 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) static const struct jz4780_dma_soc_data x1830_dma_soc_data = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) .nb_channels = 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) .transfer_ord_max = 7,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) static const struct of_device_id jz4780_dma_dt_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) { .compatible = "ingenic,x1830-dma", .data = &x1830_dma_soc_data },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) static struct platform_driver jz4780_dma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) .probe = jz4780_dma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) .remove = jz4780_dma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) .name = "jz4780-dma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) .of_match_table = of_match_ptr(jz4780_dma_dt_match),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) static int __init jz4780_dma_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) return platform_driver_register(&jz4780_dma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) subsys_initcall(jz4780_dma_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) static void __exit jz4780_dma_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) platform_driver_unregister(&jz4780_dma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) module_exit(jz4780_dma_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) MODULE_DESCRIPTION("Ingenic JZ4780 DMA controller driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) MODULE_LICENSE("GPL");