^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2014 Emilio López
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Emilio López <emilio@elopez.com.ar>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/clk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dmaengine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/of_dma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "virt-dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /** Common macros to normal and dedicated DMA registers **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define SUN4I_DMA_CFG_LOADING BIT(31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define SUN4I_DMA_CFG_DST_DATA_WIDTH(width) ((width) << 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define SUN4I_DMA_CFG_DST_BURST_LENGTH(len) ((len) << 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #define SUN4I_DMA_CFG_DST_ADDR_MODE(mode) ((mode) << 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define SUN4I_DMA_CFG_DST_DRQ_TYPE(type) ((type) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define SUN4I_DMA_CFG_SRC_DATA_WIDTH(width) ((width) << 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define SUN4I_DMA_CFG_SRC_BURST_LENGTH(len) ((len) << 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define SUN4I_DMA_CFG_SRC_ADDR_MODE(mode) ((mode) << 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define SUN4I_DMA_CFG_SRC_DRQ_TYPE(type) (type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /** Normal DMA register values **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* Normal DMA source/destination data request type values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define SUN4I_NDMA_DRQ_TYPE_SDRAM 0x16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define SUN4I_NDMA_DRQ_TYPE_LIMIT (0x1F + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /** Normal DMA register layout **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Dedicated DMA source/destination address mode values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define SUN4I_NDMA_ADDR_MODE_LINEAR 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define SUN4I_NDMA_ADDR_MODE_IO 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Normal DMA configuration register layout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define SUN4I_NDMA_CFG_CONT_MODE BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define SUN4I_NDMA_CFG_WAIT_STATE(n) ((n) << 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define SUN4I_NDMA_CFG_DST_NON_SECURE BIT(22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define SUN4I_NDMA_CFG_SRC_NON_SECURE BIT(6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /** Dedicated DMA register values **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* Dedicated DMA source/destination address mode values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define SUN4I_DDMA_ADDR_MODE_LINEAR 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define SUN4I_DDMA_ADDR_MODE_IO 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define SUN4I_DDMA_ADDR_MODE_HORIZONTAL_PAGE 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define SUN4I_DDMA_ADDR_MODE_VERTICAL_PAGE 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* Dedicated DMA source/destination data request type values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define SUN4I_DDMA_DRQ_TYPE_SDRAM 0x1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #define SUN4I_DDMA_DRQ_TYPE_LIMIT (0x1F + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /** Dedicated DMA register layout **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /* Dedicated DMA configuration register layout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define SUN4I_DDMA_CFG_BUSY BIT(30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define SUN4I_DDMA_CFG_CONT_MODE BIT(29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define SUN4I_DDMA_CFG_DST_NON_SECURE BIT(28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN BIT(15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define SUN4I_DDMA_CFG_SRC_NON_SECURE BIT(12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* Dedicated DMA parameter register layout */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(n) (((n) - 1) << 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #define SUN4I_DDMA_PARA_DST_WAIT_CYCLES(n) (((n) - 1) << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #define SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(n) (((n) - 1) << 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(n) (((n) - 1) << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /** DMA register offsets **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) /* General register offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define SUN4I_DMA_IRQ_ENABLE_REG 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define SUN4I_DMA_IRQ_PENDING_STATUS_REG 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /* Normal DMA register offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define SUN4I_NDMA_CHANNEL_REG_BASE(n) (0x100 + (n) * 0x20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define SUN4I_NDMA_CFG_REG 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define SUN4I_NDMA_SRC_ADDR_REG 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #define SUN4I_NDMA_DST_ADDR_REG 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define SUN4I_NDMA_BYTE_COUNT_REG 0xC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* Dedicated DMA register offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define SUN4I_DDMA_CHANNEL_REG_BASE(n) (0x300 + (n) * 0x20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define SUN4I_DDMA_CFG_REG 0x0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define SUN4I_DDMA_SRC_ADDR_REG 0x4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define SUN4I_DDMA_DST_ADDR_REG 0x8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define SUN4I_DDMA_BYTE_COUNT_REG 0xC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define SUN4I_DDMA_PARA_REG 0x18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) /** DMA Driver **/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * Normal DMA has 8 channels, and Dedicated DMA has another 8, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * that's 16 channels. As for endpoints, there's 29 and 21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * respectively. Given that the Normal DMA endpoints (other than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * SDRAM) can be used as tx/rx, we need 78 vchans in total
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define SUN4I_NDMA_NR_MAX_CHANNELS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define SUN4I_DDMA_NR_MAX_CHANNELS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define SUN4I_DMA_NR_MAX_CHANNELS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) (SUN4I_NDMA_NR_MAX_CHANNELS + SUN4I_DDMA_NR_MAX_CHANNELS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define SUN4I_NDMA_NR_MAX_VCHANS (29 * 2 - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define SUN4I_DDMA_NR_MAX_VCHANS 21
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #define SUN4I_DMA_NR_MAX_VCHANS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) (SUN4I_NDMA_NR_MAX_VCHANS + SUN4I_DDMA_NR_MAX_VCHANS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /* This set of SUN4I_DDMA timing parameters were found experimentally while
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * working with the SPI driver and seem to make it behave correctly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define SUN4I_DDMA_MAGIC_SPI_PARAMETERS \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) (SUN4I_DDMA_PARA_DST_DATA_BLK_SIZE(1) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) SUN4I_DDMA_PARA_SRC_DATA_BLK_SIZE(1) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) SUN4I_DDMA_PARA_DST_WAIT_CYCLES(2) | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) SUN4I_DDMA_PARA_SRC_WAIT_CYCLES(2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) struct sun4i_dma_pchan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /* Register base of channel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* vchan currently being serviced */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) struct sun4i_dma_vchan *vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /* Is this a dedicated pchan? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int is_dedicated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) struct sun4i_dma_vchan {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) struct virt_dma_chan vc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) struct dma_slave_config cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) struct sun4i_dma_pchan *pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct sun4i_dma_promise *processing;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) struct sun4i_dma_contract *contract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) u8 endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int is_dedicated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) struct sun4i_dma_promise {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) u32 cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) u32 para;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) dma_addr_t src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) dma_addr_t dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) /* A contract is a set of promises */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct sun4i_dma_contract {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) struct virt_dma_desc vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) struct list_head demands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct list_head completed_demands;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int is_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) struct sun4i_dma_dev {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) DECLARE_BITMAP(pchans_used, SUN4I_DMA_NR_MAX_CHANNELS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct dma_device slave;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct sun4i_dma_pchan *pchans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) struct sun4i_dma_vchan *vchans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) void __iomem *base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct clk *clk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static struct sun4i_dma_dev *to_sun4i_dma_dev(struct dma_device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return container_of(dev, struct sun4i_dma_dev, slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static struct sun4i_dma_vchan *to_sun4i_dma_vchan(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) return container_of(chan, struct sun4i_dma_vchan, vc.chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static struct sun4i_dma_contract *to_sun4i_dma_contract(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return container_of(vd, struct sun4i_dma_contract, vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static struct device *chan2dev(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return &chan->dev->device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static int convert_burst(u32 maxburst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (maxburst > 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) /* 1 -> 0, 4 -> 1, 8 -> 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return (maxburst >> 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) static int convert_buswidth(enum dma_slave_buswidth addr_width)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /* 8 (1 byte) -> 0, 16 (2 bytes) -> 1, 32 (4 bytes) -> 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return (addr_width >> 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) static void sun4i_dma_free_chan_resources(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) vchan_free_chan_resources(&vchan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static struct sun4i_dma_pchan *find_and_use_pchan(struct sun4i_dma_dev *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct sun4i_dma_vchan *vchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) struct sun4i_dma_pchan *pchan = NULL, *pchans = priv->pchans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) int i, max;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * pchans 0-SUN4I_NDMA_NR_MAX_CHANNELS are normal, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * SUN4I_NDMA_NR_MAX_CHANNELS+ are dedicated ones
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (vchan->is_dedicated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) i = SUN4I_NDMA_NR_MAX_CHANNELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) max = SUN4I_DMA_NR_MAX_CHANNELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) max = SUN4I_NDMA_NR_MAX_CHANNELS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) for_each_clear_bit_from(i, priv->pchans_used, max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) pchan = &pchans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) pchan->vchan = vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) set_bit(i, priv->pchans_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) return pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) static void release_pchan(struct sun4i_dma_dev *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct sun4i_dma_pchan *pchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) int nr = pchan - priv->pchans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) pchan->vchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) clear_bit(nr, priv->pchans_used);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) static void configure_pchan(struct sun4i_dma_pchan *pchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct sun4i_dma_promise *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * Configure addresses and misc parameters depending on type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * SUN4I_DDMA has an extra field with timing parameters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (pchan->is_dedicated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) writel_relaxed(d->src, pchan->base + SUN4I_DDMA_SRC_ADDR_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) writel_relaxed(d->dst, pchan->base + SUN4I_DDMA_DST_ADDR_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) writel_relaxed(d->len, pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) writel_relaxed(d->para, pchan->base + SUN4I_DDMA_PARA_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) writel_relaxed(d->cfg, pchan->base + SUN4I_DDMA_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) writel_relaxed(d->src, pchan->base + SUN4I_NDMA_SRC_ADDR_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) writel_relaxed(d->dst, pchan->base + SUN4I_NDMA_DST_ADDR_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) writel_relaxed(d->len, pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) writel_relaxed(d->cfg, pchan->base + SUN4I_NDMA_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static void set_pchan_interrupt(struct sun4i_dma_dev *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) struct sun4i_dma_pchan *pchan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) int half, int end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) u32 reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) int pchan_number = pchan - priv->pchans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) spin_lock_irqsave(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) reg = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) if (half)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) reg |= BIT(pchan_number * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) reg &= ~BIT(pchan_number * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) if (end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) reg |= BIT(pchan_number * 2 + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) reg &= ~BIT(pchan_number * 2 + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) writel_relaxed(reg, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) spin_unlock_irqrestore(&priv->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * Execute pending operations on a vchan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * When given a vchan, this function will try to acquire a suitable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * pchan and, if successful, will configure it to fulfill a promise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * from the next pending contract.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * This function must be called with &vchan->vc.lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static int __execute_vchan_pending(struct sun4i_dma_dev *priv,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) struct sun4i_dma_vchan *vchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) struct sun4i_dma_promise *promise = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) struct sun4i_dma_contract *contract = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) struct sun4i_dma_pchan *pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) lockdep_assert_held(&vchan->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* We need a pchan to do anything, so secure one if available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) pchan = find_and_use_pchan(priv, vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (!pchan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * Channel endpoints must not be repeated, so if this vchan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * has already submitted some work, we can't do anything else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (vchan->processing) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) dev_dbg(chan2dev(&vchan->vc.chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) "processing something to this endpoint already\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) goto release_pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* Figure out which contract we're working with today */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) vd = vchan_next_desc(&vchan->vc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (!vd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) dev_dbg(chan2dev(&vchan->vc.chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) "No pending contract found");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) goto release_pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) contract = to_sun4i_dma_contract(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (list_empty(&contract->demands)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /* The contract has been completed so mark it as such */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) list_del(&contract->vd.node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) vchan_cookie_complete(&contract->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) dev_dbg(chan2dev(&vchan->vc.chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) "Empty contract found and marked complete");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) } while (list_empty(&contract->demands));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) /* Now find out what we need to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) promise = list_first_entry(&contract->demands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) struct sun4i_dma_promise, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) vchan->processing = promise;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) /* ... and make it reality */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (promise) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) vchan->contract = contract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) vchan->pchan = pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) set_pchan_interrupt(priv, pchan, contract->is_cyclic, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) configure_pchan(pchan, promise);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) release_pchan:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) release_pchan(priv, pchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) static int sanitize_config(struct dma_slave_config *sconfig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) enum dma_transfer_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) switch (direction) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) case DMA_MEM_TO_DEV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) if ((sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) !sconfig->dst_maxburst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) sconfig->src_addr_width = sconfig->dst_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (!sconfig->src_maxburst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) sconfig->src_maxburst = sconfig->dst_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) case DMA_DEV_TO_MEM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) if ((sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) !sconfig->src_maxburst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) sconfig->dst_addr_width = sconfig->src_addr_width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (!sconfig->dst_maxburst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) sconfig->dst_maxburst = sconfig->src_maxburst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * Generate a promise, to be used in a normal DMA contract.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * A NDMA promise contains all the information required to program the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * normal part of the DMA Engine and get data copied. A non-executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * promise will live in the demands list on a contract. Once it has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * completed, it will be moved to the completed demands list for later freeing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * All linked promises will be freed when the corresponding contract is freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) static struct sun4i_dma_promise *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) size_t len, struct dma_slave_config *sconfig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) enum dma_transfer_direction direction)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) struct sun4i_dma_promise *promise;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) ret = sanitize_config(sconfig, direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (!promise)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) promise->src = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) promise->dst = dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) promise->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) promise->cfg = SUN4I_DMA_CFG_LOADING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) SUN4I_NDMA_CFG_BYTE_COUNT_MODE_REMAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) dev_dbg(chan2dev(chan),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) "src burst %d, dst burst %d, src buswidth %d, dst buswidth %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) sconfig->src_maxburst, sconfig->dst_maxburst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) sconfig->src_addr_width, sconfig->dst_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) /* Source burst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ret = convert_burst(sconfig->src_maxburst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) /* Destination burst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) ret = convert_burst(sconfig->dst_maxburst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /* Source bus width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ret = convert_buswidth(sconfig->src_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) /* Destination bus width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) ret = convert_buswidth(sconfig->dst_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) return promise;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) kfree(promise);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * Generate a promise, to be used in a dedicated DMA contract.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * A DDMA promise contains all the information required to program the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * Dedicated part of the DMA Engine and get data copied. A non-executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * promise will live in the demands list on a contract. Once it has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * completed, it will be moved to the completed demands list for later freeing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * All linked promises will be freed when the corresponding contract is freed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static struct sun4i_dma_promise *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) size_t len, struct dma_slave_config *sconfig)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) struct sun4i_dma_promise *promise;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) promise = kzalloc(sizeof(*promise), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (!promise)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) promise->src = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) promise->dst = dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) promise->len = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) promise->cfg = SUN4I_DMA_CFG_LOADING |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) SUN4I_DDMA_CFG_BYTE_COUNT_MODE_REMAIN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) /* Source burst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) ret = convert_burst(sconfig->src_maxburst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /* Destination burst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) ret = convert_burst(sconfig->dst_maxburst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) /* Source bus width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) ret = convert_buswidth(sconfig->src_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /* Destination bus width */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) ret = convert_buswidth(sconfig->dst_addr_width);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return promise;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) kfree(promise);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * Generate a contract
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * Contracts function as DMA descriptors. As our hardware does not support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * linked lists, we need to implement SG via software. We use a contract
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * to hold all the pieces of the request and process them serially one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * after another. Each piece is represented as a promise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static struct sun4i_dma_contract *generate_dma_contract(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) struct sun4i_dma_contract *contract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) contract = kzalloc(sizeof(*contract), GFP_NOWAIT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (!contract)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) INIT_LIST_HEAD(&contract->demands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) INIT_LIST_HEAD(&contract->completed_demands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) return contract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * Get next promise on a cyclic transfer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) * Cyclic contracts contain a series of promises which are executed on a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) * loop. This function returns the next promise from a cyclic contract,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) * so it can be programmed into the hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static struct sun4i_dma_promise *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) get_next_cyclic_promise(struct sun4i_dma_contract *contract)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct sun4i_dma_promise *promise;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) promise = list_first_entry_or_null(&contract->demands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct sun4i_dma_promise, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) if (!promise) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) list_splice_init(&contract->completed_demands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) &contract->demands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) promise = list_first_entry(&contract->demands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) struct sun4i_dma_promise, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return promise;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * Free a contract and all its associated promises
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) struct sun4i_dma_promise *promise, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /* Free all the demands and completed demands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) list_for_each_entry_safe(promise, tmp, &contract->demands, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) kfree(promise);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) kfree(promise);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) kfree(contract);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) sun4i_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) dma_addr_t src, size_t len, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) struct dma_slave_config *sconfig = &vchan->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) struct sun4i_dma_promise *promise;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) struct sun4i_dma_contract *contract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) contract = generate_dma_contract();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (!contract)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) * We can only do the copy to bus aligned addresses, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) * choose the best one so we get decent performance. We also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) * maximize the burst size for this same reason.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) sconfig->src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) sconfig->dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) sconfig->src_maxburst = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) sconfig->dst_maxburst = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (vchan->is_dedicated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) promise = generate_ddma_promise(chan, src, dest, len, sconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) promise = generate_ndma_promise(chan, src, dest, len, sconfig,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) DMA_MEM_TO_MEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) if (!promise) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) kfree(contract);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) /* Configure memcpy mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) if (vchan->is_dedicated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_DDMA_DRQ_TYPE_SDRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) promise->cfg |= SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /* Fill the contract with our only promise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) list_add_tail(&promise->list, &contract->demands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) /* And add it to the vchan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) size_t period_len, enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) struct dma_slave_config *sconfig = &vchan->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct sun4i_dma_promise *promise;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) struct sun4i_dma_contract *contract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) dma_addr_t src, dest;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) u32 endpoints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) int nr_periods, offset, plength, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) u8 ram_type, io_mode, linear_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) if (!is_slave_direction(dir)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) dev_err(chan2dev(chan), "Invalid DMA direction\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) contract = generate_dma_contract();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) if (!contract)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) contract->is_cyclic = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) if (vchan->is_dedicated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) io_mode = SUN4I_DDMA_ADDR_MODE_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) io_mode = SUN4I_NDMA_ADDR_MODE_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) src = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) dest = sconfig->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) src = sconfig->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) dest = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) * We will be using half done interrupts to make two periods
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) * out of a promise, so we need to program the DMA engine less
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * often
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * The engine can interrupt on half-transfer, so we can use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * this feature to program the engine half as often as if we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * didn't use it (keep in mind the hardware doesn't support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * linked lists).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * Say you have a set of periods (| marks the start/end, I for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * interrupt, P for programming the engine to do a new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * transfer), the easy but slow way would be to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * |---|---|---|---| (periods / promises)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * P I,P I,P I,P I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * Using half transfer interrupts you can do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * |-------|-------| (promises as configured on hw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * |---|---|---|---| (periods)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * P I I,P I I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * Which requires half the engine programming for the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * functionality.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) nr_periods = DIV_ROUND_UP(len / period_len, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) for (i = 0; i < nr_periods; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) /* Calculate the offset in the buffer and the length needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) offset = i * period_len * 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) plength = min((len - offset), (period_len * 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) if (dir == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) src = buf + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) dest = buf + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /* Make the promise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) if (vchan->is_dedicated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) promise = generate_ddma_promise(chan, src, dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) plength, sconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) promise = generate_ndma_promise(chan, src, dest,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) plength, sconfig, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) if (!promise) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) /* TODO: should we free everything? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) promise->cfg |= endpoints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) /* Then add it to the contract */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) list_add_tail(&promise->list, &contract->demands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /* And add it to the vchan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) static struct dma_async_tx_descriptor *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) sun4i_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) unsigned int sg_len, enum dma_transfer_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) unsigned long flags, void *context)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct dma_slave_config *sconfig = &vchan->cfg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) struct sun4i_dma_promise *promise;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct sun4i_dma_contract *contract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) u8 ram_type, io_mode, linear_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) dma_addr_t srcaddr, dstaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) u32 endpoints, para;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (!sgl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) if (!is_slave_direction(dir)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) dev_err(chan2dev(chan), "Invalid DMA direction\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) contract = generate_dma_contract();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) if (!contract)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (vchan->is_dedicated) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) io_mode = SUN4I_DDMA_ADDR_MODE_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) io_mode = SUN4I_NDMA_ADDR_MODE_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) if (dir == DMA_MEM_TO_DEV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) SUN4I_DMA_CFG_SRC_ADDR_MODE(linear_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) SUN4I_DMA_CFG_DST_ADDR_MODE(linear_mode) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) for_each_sg(sgl, sg, sg_len, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) /* Figure out addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (dir == DMA_MEM_TO_DEV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) srcaddr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) dstaddr = sconfig->dst_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) srcaddr = sconfig->src_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) dstaddr = sg_dma_address(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * These are the magic DMA engine timings that keep SPI going.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) * I haven't seen any interface on DMAEngine to configure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * timings, and so far they seem to work for everything we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * support, so I've kept them here. I don't know if other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * devices need different timings because, as usual, we only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * have the "para" bitfield meanings, but no comment on what
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * the values should be when doing a certain operation :|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) para = SUN4I_DDMA_MAGIC_SPI_PARAMETERS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /* And make a suitable promise */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) if (vchan->is_dedicated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) promise = generate_ddma_promise(chan, srcaddr, dstaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) sg_dma_len(sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) sconfig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) promise = generate_ndma_promise(chan, srcaddr, dstaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) sg_dma_len(sg),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) sconfig, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) if (!promise)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return NULL; /* TODO: should we free everything? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) promise->cfg |= endpoints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) promise->para = para;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /* Then add it to the contract */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) list_add_tail(&promise->list, &contract->demands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) * Once we've got all the promises ready, add the contract
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) * to the pending list on the vchan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) return vchan_tx_prep(&vchan->vc, &contract->vd, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static int sun4i_dma_terminate_all(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) struct sun4i_dma_pchan *pchan = vchan->pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) LIST_HEAD(head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) spin_lock_irqsave(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) vchan_get_all_descriptors(&vchan->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) spin_unlock_irqrestore(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) * Clearing the configuration register will halt the pchan. Interrupts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * may still trigger, so don't forget to disable them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) if (pchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) if (pchan->is_dedicated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) writel(0, pchan->base + SUN4I_DDMA_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) writel(0, pchan->base + SUN4I_NDMA_CFG_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) set_pchan_interrupt(priv, pchan, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) release_pchan(priv, pchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) spin_lock_irqsave(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) /* Clear these so the vchan is usable again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) vchan->processing = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) vchan->pchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) spin_unlock_irqrestore(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) vchan_dma_desc_free_list(&vchan->vc, &head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) static int sun4i_dma_config(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) struct dma_slave_config *config)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) memcpy(&vchan->cfg, config, sizeof(*config));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) static struct dma_chan *sun4i_dma_of_xlate(struct of_phandle_args *dma_spec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) struct of_dma *ofdma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct sun4i_dma_dev *priv = ofdma->of_dma_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) struct sun4i_dma_vchan *vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) struct dma_chan *chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) u8 is_dedicated = dma_spec->args[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) u8 endpoint = dma_spec->args[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) /* Check if type is Normal or Dedicated */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) if (is_dedicated != 0 && is_dedicated != 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) /* Make sure the endpoint looks sane */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) if ((is_dedicated && endpoint >= SUN4I_DDMA_DRQ_TYPE_LIMIT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) (!is_dedicated && endpoint >= SUN4I_NDMA_DRQ_TYPE_LIMIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) chan = dma_get_any_slave_channel(&priv->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) if (!chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) /* Assign the endpoint to the vchan */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) vchan = to_sun4i_dma_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) vchan->is_dedicated = is_dedicated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) vchan->endpoint = endpoint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) return chan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) static enum dma_status sun4i_dma_tx_status(struct dma_chan *chan,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) dma_cookie_t cookie,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) struct dma_tx_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) struct sun4i_dma_pchan *pchan = vchan->pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) struct sun4i_dma_contract *contract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) struct sun4i_dma_promise *promise;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) struct virt_dma_desc *vd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) enum dma_status ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) size_t bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) ret = dma_cookie_status(chan, cookie, state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) if (!state || (ret == DMA_COMPLETE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) spin_lock_irqsave(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) vd = vchan_find_desc(&vchan->vc, cookie);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) if (!vd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) goto exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) contract = to_sun4i_dma_contract(vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) list_for_each_entry(promise, &contract->demands, list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) bytes += promise->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * The hardware is configured to return the remaining byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * quantity. If possible, replace the first listed element's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * full size with the actual remaining amount
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) promise = list_first_entry_or_null(&contract->demands,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) struct sun4i_dma_promise, list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (promise && pchan) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) bytes -= promise->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) if (pchan->is_dedicated)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) bytes += readl(pchan->base + SUN4I_DDMA_BYTE_COUNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) bytes += readl(pchan->base + SUN4I_NDMA_BYTE_COUNT_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) exit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) dma_set_residue(state, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) spin_unlock_irqrestore(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static void sun4i_dma_issue_pending(struct dma_chan *chan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) struct sun4i_dma_dev *priv = to_sun4i_dma_dev(chan->device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) struct sun4i_dma_vchan *vchan = to_sun4i_dma_vchan(chan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) spin_lock_irqsave(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) * If there are pending transactions for this vchan, push one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) * them into the engine to get the ball rolling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) if (vchan_issue_pending(&vchan->vc))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) __execute_vchan_pending(priv, vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) spin_unlock_irqrestore(&vchan->vc.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) static irqreturn_t sun4i_dma_interrupt(int irq, void *dev_id)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) struct sun4i_dma_dev *priv = dev_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) struct sun4i_dma_pchan *pchans = priv->pchans, *pchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) struct sun4i_dma_vchan *vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) struct sun4i_dma_contract *contract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) struct sun4i_dma_promise *promise;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) unsigned long pendirq, irqs, disableirqs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) int bit, i, free_room, allow_mitigation = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) pendirq = readl_relaxed(priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) handle_pending:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) disableirqs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) free_room = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) for_each_set_bit(bit, &pendirq, 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) pchan = &pchans[bit >> 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) vchan = pchan->vchan;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) if (!vchan) /* a terminated channel may still interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) contract = vchan->contract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * Disable the IRQ and free the pchan if it's an end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) * interrupt (odd bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) if (bit & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) spin_lock(&vchan->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * Move the promise into the completed list now that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * we're done with it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) list_del(&vchan->processing->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) list_add_tail(&vchan->processing->list,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) &contract->completed_demands);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * Cyclic DMA transfers are special:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * - There's always something we can dispatch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * - We need to run the callback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) * - Latency is very important, as this is used by audio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * We therefore just cycle through the list and dispatch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * whatever we have here, reusing the pchan. There's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * no need to run the thread after this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) * For non-cyclic transfers we need to look around,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) * so we can program some more work, or notify the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * client that their transfers have been completed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) if (contract->is_cyclic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) promise = get_next_cyclic_promise(contract);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) vchan->processing = promise;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) configure_pchan(pchan, promise);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) vchan_cyclic_callback(&contract->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) vchan->processing = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) vchan->pchan = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) free_room = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) disableirqs |= BIT(bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) release_pchan(priv, pchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) spin_unlock(&vchan->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) /* Half done interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) if (contract->is_cyclic)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) vchan_cyclic_callback(&contract->vd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) disableirqs |= BIT(bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) /* Disable the IRQs for events we handled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) spin_lock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) irqs = readl_relaxed(priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) writel_relaxed(irqs & ~disableirqs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) spin_unlock(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /* Writing 1 to the pending field will clear the pending interrupt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) writel_relaxed(pendirq, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * If a pchan was freed, we may be able to schedule something else,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * so have a look around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) if (free_room) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) vchan = &priv->vchans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) spin_lock(&vchan->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) __execute_vchan_pending(priv, vchan);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) spin_unlock(&vchan->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * Handle newer interrupts if some showed up, but only do it once
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * to avoid a too long a loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) if (allow_mitigation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) pendirq = readl_relaxed(priv->base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) SUN4I_DMA_IRQ_PENDING_STATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) if (pendirq) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) allow_mitigation = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) goto handle_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) return IRQ_HANDLED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) static int sun4i_dma_probe(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) struct sun4i_dma_dev *priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) struct resource *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) int i, j, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (!priv)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) priv->base = devm_ioremap_resource(&pdev->dev, res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) if (IS_ERR(priv->base))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) return PTR_ERR(priv->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) priv->irq = platform_get_irq(pdev, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) if (priv->irq < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) return priv->irq;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) priv->clk = devm_clk_get(&pdev->dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) if (IS_ERR(priv->clk)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) dev_err(&pdev->dev, "No clock specified\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) return PTR_ERR(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) platform_set_drvdata(pdev, priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) spin_lock_init(&priv->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) dma_cap_zero(priv->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) dma_cap_set(DMA_PRIVATE, priv->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) dma_cap_set(DMA_MEMCPY, priv->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) dma_cap_set(DMA_CYCLIC, priv->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) dma_cap_set(DMA_SLAVE, priv->slave.cap_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) INIT_LIST_HEAD(&priv->slave.channels);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) priv->slave.device_free_chan_resources = sun4i_dma_free_chan_resources;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) priv->slave.device_tx_status = sun4i_dma_tx_status;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) priv->slave.device_issue_pending = sun4i_dma_issue_pending;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) priv->slave.device_prep_slave_sg = sun4i_dma_prep_slave_sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) priv->slave.device_prep_dma_memcpy = sun4i_dma_prep_dma_memcpy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) priv->slave.device_prep_dma_cyclic = sun4i_dma_prep_dma_cyclic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) priv->slave.device_config = sun4i_dma_config;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) priv->slave.device_terminate_all = sun4i_dma_terminate_all;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) priv->slave.copy_align = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) priv->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) priv->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) priv->slave.directions = BIT(DMA_DEV_TO_MEM) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) BIT(DMA_MEM_TO_DEV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) priv->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) priv->slave.dev = &pdev->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) priv->pchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_CHANNELS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) sizeof(struct sun4i_dma_pchan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) priv->vchans = devm_kcalloc(&pdev->dev, SUN4I_DMA_NR_MAX_VCHANS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) sizeof(struct sun4i_dma_vchan), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) if (!priv->vchans || !priv->pchans)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * [0..SUN4I_NDMA_NR_MAX_CHANNELS) are normal pchans, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) * [SUN4I_NDMA_NR_MAX_CHANNELS..SUN4I_DMA_NR_MAX_CHANNELS) are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) * dedicated ones
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) for (i = 0; i < SUN4I_NDMA_NR_MAX_CHANNELS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) priv->pchans[i].base = priv->base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) SUN4I_NDMA_CHANNEL_REG_BASE(i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) for (j = 0; i < SUN4I_DMA_NR_MAX_CHANNELS; i++, j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) priv->pchans[i].base = priv->base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) SUN4I_DDMA_CHANNEL_REG_BASE(j);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) priv->pchans[i].is_dedicated = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) for (i = 0; i < SUN4I_DMA_NR_MAX_VCHANS; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) struct sun4i_dma_vchan *vchan = &priv->vchans[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) spin_lock_init(&vchan->vc.lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) vchan->vc.desc_free = sun4i_dma_free_contract;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) vchan_init(&vchan->vc, &priv->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) ret = clk_prepare_enable(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) dev_err(&pdev->dev, "Couldn't enable the clock\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * Make sure the IRQs are all disabled and accounted for. The bootloader
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * likes to leave these dirty
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) writel(0, priv->base + SUN4I_DMA_IRQ_ENABLE_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) writel(0xFFFFFFFF, priv->base + SUN4I_DMA_IRQ_PENDING_STATUS_REG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) ret = devm_request_irq(&pdev->dev, priv->irq, sun4i_dma_interrupt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 0, dev_name(&pdev->dev), priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) dev_err(&pdev->dev, "Cannot request IRQ\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) goto err_clk_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) ret = dma_async_device_register(&priv->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) goto err_clk_disable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) ret = of_dma_controller_register(pdev->dev.of_node, sun4i_dma_of_xlate,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) priv);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) dev_err(&pdev->dev, "of_dma_controller_register failed\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) goto err_dma_unregister;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) dev_dbg(&pdev->dev, "Successfully probed SUN4I_DMA\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) err_dma_unregister:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) dma_async_device_unregister(&priv->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) err_clk_disable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) static int sun4i_dma_remove(struct platform_device *pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) struct sun4i_dma_dev *priv = platform_get_drvdata(pdev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) /* Disable IRQ so no more work is scheduled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) disable_irq(priv->irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) of_dma_controller_free(pdev->dev.of_node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) dma_async_device_unregister(&priv->slave);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) clk_disable_unprepare(priv->clk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) static const struct of_device_id sun4i_dma_match[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) { .compatible = "allwinner,sun4i-a10-dma" },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) { /* sentinel */ },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) MODULE_DEVICE_TABLE(of, sun4i_dma_match);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) static struct platform_driver sun4i_dma_driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) .probe = sun4i_dma_probe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) .remove = sun4i_dma_remove,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) .driver = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) .name = "sun4i-dma",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) .of_match_table = sun4i_dma_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) module_platform_driver(sun4i_dma_driver);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) MODULE_DESCRIPTION("Allwinner A10 Dedicated DMA Controller Driver");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) MODULE_AUTHOR("Emilio López <emilio@elopez.com.ar>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) MODULE_LICENSE("GPL");